Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 850fdf70db2b |
children | 98cb887364d3 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_cardTableExtension.cpp.incl" | |
27 | |
28 // Checks an individual oop for missing precise marks. Mark | |
29 // may be either dirty or newgen. | |
30 class CheckForUnmarkedOops : public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
31 private: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
32 PSYoungGen* _young_gen; |
0 | 33 CardTableExtension* _card_table; |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
34 HeapWord* _unmarked_addr; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
35 jbyte* _unmarked_card; |
0 | 36 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
37 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
38 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
39 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
40 if (_young_gen->is_in_reserved(obj) && |
0 | 41 !_card_table->addr_is_marked_imprecise(p)) { |
42 // Don't overwrite the first missing card mark | |
43 if (_unmarked_addr == NULL) { | |
44 _unmarked_addr = (HeapWord*)p; | |
45 _unmarked_card = _card_table->byte_for(p); | |
46 } | |
47 } | |
48 } | |
49 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
50 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
51 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
52 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
53 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
54 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
55 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
56 |
0 | 57 bool has_unmarked_oop() { |
58 return _unmarked_addr != NULL; | |
59 } | |
60 }; | |
61 | |
62 // Checks all objects for the existance of some type of mark, | |
63 // precise or imprecise, dirty or newgen. | |
64 class CheckForUnmarkedObjects : public ObjectClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
65 private: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
66 PSYoungGen* _young_gen; |
0 | 67 CardTableExtension* _card_table; |
68 | |
69 public: | |
70 CheckForUnmarkedObjects() { | |
71 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
72 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
73 | |
74 _young_gen = heap->young_gen(); | |
75 _card_table = (CardTableExtension*)heap->barrier_set(); | |
76 // No point in asserting barrier set type here. Need to make CardTableExtension | |
77 // a unique barrier set type. | |
78 } | |
79 | |
80 // Card marks are not precise. The current system can leave us with | |
81 // a mismash of precise marks and begining of object marks. This means | |
82 // we test for missing precise marks first. If any are found, we don't | |
83 // fail unless the object head is also unmarked. | |
84 virtual void do_object(oop obj) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
85 CheckForUnmarkedOops object_check(_young_gen, _card_table); |
0 | 86 obj->oop_iterate(&object_check); |
87 if (object_check.has_unmarked_oop()) { | |
88 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); | |
89 } | |
90 } | |
91 }; | |
92 | |
93 // Checks for precise marking of oops as newgen. | |
94 class CheckForPreciseMarks : public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
95 private: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
96 PSYoungGen* _young_gen; |
0 | 97 CardTableExtension* _card_table; |
98 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
99 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
100 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
101 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
102 if (_young_gen->is_in_reserved(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
103 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
104 _card_table->set_card_newgen(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
105 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
106 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
107 |
0 | 108 public: |
109 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : | |
110 _young_gen(young_gen), _card_table(card_table) { } | |
111 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
112 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
113 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } |
0 | 114 }; |
115 | |
116 // We get passed the space_top value to prevent us from traversing into | |
117 // the old_gen promotion labs, which cannot be safely parsed. | |
118 void CardTableExtension::scavenge_contents(ObjectStartArray* start_array, | |
119 MutableSpace* sp, | |
120 HeapWord* space_top, | |
121 PSPromotionManager* pm) | |
122 { | |
123 assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity"); | |
124 assert(start_array->covered_region().contains(sp->used_region()), | |
125 "ObjectStartArray does not cover space"); | |
126 bool depth_first = pm->depth_first(); | |
127 | |
128 if (sp->not_empty()) { | |
129 oop* sp_top = (oop*)space_top; | |
130 oop* prev_top = NULL; | |
131 jbyte* current_card = byte_for(sp->bottom()); | |
132 jbyte* end_card = byte_for(sp_top - 1); // sp_top is exclusive | |
133 // scan card marking array | |
134 while (current_card <= end_card) { | |
135 jbyte value = *current_card; | |
136 // skip clean cards | |
137 if (card_is_clean(value)) { | |
138 current_card++; | |
139 } else { | |
140 // we found a non-clean card | |
141 jbyte* first_nonclean_card = current_card++; | |
142 oop* bottom = (oop*)addr_for(first_nonclean_card); | |
143 // find object starting on card | |
144 oop* bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom); | |
145 // bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom); | |
146 assert(bottom_obj <= bottom, "just checking"); | |
147 // make sure we don't scan oops we already looked at | |
148 if (bottom < prev_top) bottom = prev_top; | |
149 // figure out when to stop scanning | |
150 jbyte* first_clean_card; | |
151 oop* top; | |
152 bool restart_scanning; | |
153 do { | |
154 restart_scanning = false; | |
155 // find a clean card | |
156 while (current_card <= end_card) { | |
157 value = *current_card; | |
158 if (card_is_clean(value)) break; | |
159 current_card++; | |
160 } | |
161 // check if we reached the end, if so we are done | |
162 if (current_card >= end_card) { | |
163 first_clean_card = end_card + 1; | |
164 current_card++; | |
165 top = sp_top; | |
166 } else { | |
167 // we have a clean card, find object starting on that card | |
168 first_clean_card = current_card++; | |
169 top = (oop*)addr_for(first_clean_card); | |
170 oop* top_obj = (oop*)start_array->object_start((HeapWord*)top); | |
171 // top_obj = (oop*)start_array->object_start((HeapWord*)top); | |
172 assert(top_obj <= top, "just checking"); | |
173 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { | |
174 // an arrayOop is starting on the clean card - since we do exact store | |
175 // checks for objArrays we are done | |
176 } else { | |
177 // otherwise, it is possible that the object starting on the clean card | |
178 // spans the entire card, and that the store happened on a later card. | |
179 // figure out where the object ends | |
180 top = top_obj + oop(top_obj)->size(); | |
181 jbyte* top_card = CardTableModRefBS::byte_for(top - 1); // top is exclusive | |
182 if (top_card > first_clean_card) { | |
183 // object ends a different card | |
184 current_card = top_card + 1; | |
185 if (card_is_clean(*top_card)) { | |
186 // the ending card is clean, we are done | |
187 first_clean_card = top_card; | |
188 } else { | |
189 // the ending card is not clean, continue scanning at start of do-while | |
190 restart_scanning = true; | |
191 } | |
192 } else { | |
193 // object ends on the clean card, we are done. | |
194 assert(first_clean_card == top_card, "just checking"); | |
195 } | |
196 } | |
197 } | |
198 } while (restart_scanning); | |
199 // we know which cards to scan, now clear them | |
200 while (first_nonclean_card < first_clean_card) { | |
201 *first_nonclean_card++ = clean_card; | |
202 } | |
203 // scan oops in objects | |
204 // hoisted the if (depth_first) check out of the loop | |
205 if (depth_first){ | |
206 do { | |
207 oop(bottom_obj)->push_contents(pm); | |
208 bottom_obj += oop(bottom_obj)->size(); | |
209 assert(bottom_obj <= sp_top, "just checking"); | |
210 } while (bottom_obj < top); | |
211 pm->drain_stacks_cond_depth(); | |
212 } else { | |
213 do { | |
214 oop(bottom_obj)->copy_contents(pm); | |
215 bottom_obj += oop(bottom_obj)->size(); | |
216 assert(bottom_obj <= sp_top, "just checking"); | |
217 } while (bottom_obj < top); | |
218 } | |
219 // remember top oop* scanned | |
220 prev_top = top; | |
221 } | |
222 } | |
223 } | |
224 } | |
225 | |
226 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, | |
227 MutableSpace* sp, | |
228 HeapWord* space_top, | |
229 PSPromotionManager* pm, | |
230 uint stripe_number) { | |
231 int ssize = 128; // Naked constant! Work unit = 64k. | |
232 int dirty_card_count = 0; | |
233 bool depth_first = pm->depth_first(); | |
234 | |
235 oop* sp_top = (oop*)space_top; | |
236 jbyte* start_card = byte_for(sp->bottom()); | |
237 jbyte* end_card = byte_for(sp_top - 1) + 1; | |
238 oop* last_scanned = NULL; // Prevent scanning objects more than once | |
239 for (jbyte* slice = start_card; slice < end_card; slice += ssize*ParallelGCThreads) { | |
240 jbyte* worker_start_card = slice + stripe_number * ssize; | |
241 if (worker_start_card >= end_card) | |
242 return; // We're done. | |
243 | |
244 jbyte* worker_end_card = worker_start_card + ssize; | |
245 if (worker_end_card > end_card) | |
246 worker_end_card = end_card; | |
247 | |
248 // We do not want to scan objects more than once. In order to accomplish | |
249 // this, we assert that any object with an object head inside our 'slice' | |
250 // belongs to us. We may need to extend the range of scanned cards if the | |
251 // last object continues into the next 'slice'. | |
252 // | |
253 // Note! ending cards are exclusive! | |
254 HeapWord* slice_start = addr_for(worker_start_card); | |
255 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); | |
256 | |
257 // If there are not objects starting within the chunk, skip it. | |
258 if (!start_array->object_starts_in_range(slice_start, slice_end)) { | |
259 continue; | |
260 } | |
261 // Update our begining addr | |
262 HeapWord* first_object = start_array->object_start(slice_start); | |
263 debug_only(oop* first_object_within_slice = (oop*) first_object;) | |
264 if (first_object < slice_start) { | |
265 last_scanned = (oop*)(first_object + oop(first_object)->size()); | |
266 debug_only(first_object_within_slice = last_scanned;) | |
267 worker_start_card = byte_for(last_scanned); | |
268 } | |
269 | |
270 // Update the ending addr | |
271 if (slice_end < (HeapWord*)sp_top) { | |
272 // The subtraction is important! An object may start precisely at slice_end. | |
273 HeapWord* last_object = start_array->object_start(slice_end - 1); | |
274 slice_end = last_object + oop(last_object)->size(); | |
275 // worker_end_card is exclusive, so bump it one past the end of last_object's | |
276 // covered span. | |
277 worker_end_card = byte_for(slice_end) + 1; | |
278 | |
279 if (worker_end_card > end_card) | |
280 worker_end_card = end_card; | |
281 } | |
282 | |
283 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); | |
284 assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); | |
285 assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); | |
286 // Note that worker_start_card >= worker_end_card is legal, and happens when | |
287 // an object spans an entire slice. | |
288 assert(worker_start_card <= end_card, "worker start card beyond end card"); | |
289 assert(worker_end_card <= end_card, "worker end card beyond end card"); | |
290 | |
291 jbyte* current_card = worker_start_card; | |
292 while (current_card < worker_end_card) { | |
293 // Find an unclean card. | |
294 while (current_card < worker_end_card && card_is_clean(*current_card)) { | |
295 current_card++; | |
296 } | |
297 jbyte* first_unclean_card = current_card; | |
298 | |
299 // Find the end of a run of contiguous unclean cards | |
300 while (current_card < worker_end_card && !card_is_clean(*current_card)) { | |
301 while (current_card < worker_end_card && !card_is_clean(*current_card)) { | |
302 current_card++; | |
303 } | |
304 | |
305 if (current_card < worker_end_card) { | |
306 // Some objects may be large enough to span several cards. If such | |
307 // an object has more than one dirty card, separated by a clean card, | |
308 // we will attempt to scan it twice. The test against "last_scanned" | |
309 // prevents the redundant object scan, but it does not prevent newly | |
310 // marked cards from being cleaned. | |
311 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); | |
312 size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); | |
313 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; | |
314 jbyte* ending_card_of_last_object = byte_for(end_of_last_object); | |
315 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); | |
316 if (ending_card_of_last_object > current_card) { | |
317 // This means the object spans the next complete card. | |
318 // We need to bump the current_card to ending_card_of_last_object | |
319 current_card = ending_card_of_last_object; | |
320 } | |
321 } | |
322 } | |
323 jbyte* following_clean_card = current_card; | |
324 | |
325 if (first_unclean_card < worker_end_card) { | |
326 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); | |
327 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); | |
328 // "p" should always be >= "last_scanned" because newly GC dirtied | |
329 // cards are no longer scanned again (see comment at end | |
330 // of loop on the increment of "current_card"). Test that | |
331 // hypothesis before removing this code. | |
332 // If this code is removed, deal with the first time through | |
333 // the loop when the last_scanned is the object starting in | |
334 // the previous slice. | |
335 assert((p >= last_scanned) || | |
336 (last_scanned == first_object_within_slice), | |
337 "Should no longer be possible"); | |
338 if (p < last_scanned) { | |
339 // Avoid scanning more than once; this can happen because | |
340 // newgen cards set by GC may a different set than the | |
341 // originally dirty set | |
342 p = last_scanned; | |
343 } | |
344 oop* to = (oop*)addr_for(following_clean_card); | |
345 | |
346 // Test slice_end first! | |
347 if ((HeapWord*)to > slice_end) { | |
348 to = (oop*)slice_end; | |
349 } else if (to > sp_top) { | |
350 to = sp_top; | |
351 } | |
352 | |
353 // we know which cards to scan, now clear them | |
354 if (first_unclean_card <= worker_start_card+1) | |
355 first_unclean_card = worker_start_card+1; | |
356 if (following_clean_card >= worker_end_card-1) | |
357 following_clean_card = worker_end_card-1; | |
358 | |
359 while (first_unclean_card < following_clean_card) { | |
360 *first_unclean_card++ = clean_card; | |
361 } | |
362 | |
363 const int interval = PrefetchScanIntervalInBytes; | |
364 // scan all objects in the range | |
365 if (interval != 0) { | |
366 // hoisted the if (depth_first) check out of the loop | |
367 if (depth_first) { | |
368 while (p < to) { | |
369 Prefetch::write(p, interval); | |
370 oop m = oop(p); | |
371 assert(m->is_oop_or_null(), "check for header"); | |
372 m->push_contents(pm); | |
373 p += m->size(); | |
374 } | |
375 pm->drain_stacks_cond_depth(); | |
376 } else { | |
377 while (p < to) { | |
378 Prefetch::write(p, interval); | |
379 oop m = oop(p); | |
380 assert(m->is_oop_or_null(), "check for header"); | |
381 m->copy_contents(pm); | |
382 p += m->size(); | |
383 } | |
384 } | |
385 } else { | |
386 // hoisted the if (depth_first) check out of the loop | |
387 if (depth_first) { | |
388 while (p < to) { | |
389 oop m = oop(p); | |
390 assert(m->is_oop_or_null(), "check for header"); | |
391 m->push_contents(pm); | |
392 p += m->size(); | |
393 } | |
394 pm->drain_stacks_cond_depth(); | |
395 } else { | |
396 while (p < to) { | |
397 oop m = oop(p); | |
398 assert(m->is_oop_or_null(), "check for header"); | |
399 m->copy_contents(pm); | |
400 p += m->size(); | |
401 } | |
402 } | |
403 } | |
404 last_scanned = p; | |
405 } | |
406 // "current_card" is still the "following_clean_card" or | |
407 // the current_card is >= the worker_end_card so the | |
408 // loop will not execute again. | |
409 assert((current_card == following_clean_card) || | |
410 (current_card >= worker_end_card), | |
411 "current_card should only be incremented if it still equals " | |
412 "following_clean_card"); | |
413 // Increment current_card so that it is not processed again. | |
414 // It may now be dirty because a old-to-young pointer was | |
415 // found on it an updated. If it is now dirty, it cannot be | |
416 // be safely cleaned in the next iteration. | |
417 current_card++; | |
418 } | |
419 } | |
420 } | |
421 | |
422 // This should be called before a scavenge. | |
423 void CardTableExtension::verify_all_young_refs_imprecise() { | |
424 CheckForUnmarkedObjects check; | |
425 | |
426 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
427 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
428 | |
429 PSOldGen* old_gen = heap->old_gen(); | |
430 PSPermGen* perm_gen = heap->perm_gen(); | |
431 | |
432 old_gen->object_iterate(&check); | |
433 perm_gen->object_iterate(&check); | |
434 } | |
435 | |
436 // This should be called immediately after a scavenge, before mutators resume. | |
437 void CardTableExtension::verify_all_young_refs_precise() { | |
438 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
439 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
440 | |
441 PSOldGen* old_gen = heap->old_gen(); | |
442 PSPermGen* perm_gen = heap->perm_gen(); | |
443 | |
444 CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set()); | |
445 | |
446 old_gen->oop_iterate(&check); | |
447 perm_gen->oop_iterate(&check); | |
448 | |
449 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); | |
450 verify_all_young_refs_precise_helper(perm_gen->object_space()->used_region()); | |
451 } | |
452 | |
453 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { | |
454 CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set(); | |
455 // FIX ME ASSERT HERE | |
456 | |
457 jbyte* bot = card_table->byte_for(mr.start()); | |
458 jbyte* top = card_table->byte_for(mr.end()); | |
459 while(bot <= top) { | |
460 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); | |
461 if (*bot == verify_card) | |
462 *bot = youngergen_card; | |
463 bot++; | |
464 } | |
465 } | |
466 | |
467 bool CardTableExtension::addr_is_marked_imprecise(void *addr) { | |
468 jbyte* p = byte_for(addr); | |
469 jbyte val = *p; | |
470 | |
471 if (card_is_dirty(val)) | |
472 return true; | |
473 | |
474 if (card_is_newgen(val)) | |
475 return true; | |
476 | |
477 if (card_is_clean(val)) | |
478 return false; | |
479 | |
480 assert(false, "Found unhandled card mark type"); | |
481 | |
482 return false; | |
483 } | |
484 | |
485 // Also includes verify_card | |
486 bool CardTableExtension::addr_is_marked_precise(void *addr) { | |
487 jbyte* p = byte_for(addr); | |
488 jbyte val = *p; | |
489 | |
490 if (card_is_newgen(val)) | |
491 return true; | |
492 | |
493 if (card_is_verify(val)) | |
494 return true; | |
495 | |
496 if (card_is_clean(val)) | |
497 return false; | |
498 | |
499 if (card_is_dirty(val)) | |
500 return false; | |
501 | |
502 assert(false, "Found unhandled card mark type"); | |
503 | |
504 return false; | |
505 } | |
506 | |
507 // Assumes that only the base or the end changes. This allows indentification | |
508 // of the region that is being resized. The | |
509 // CardTableModRefBS::resize_covered_region() is used for the normal case | |
510 // where the covered regions are growing or shrinking at the high end. | |
511 // The method resize_covered_region_by_end() is analogous to | |
512 // CardTableModRefBS::resize_covered_region() but | |
513 // for regions that grow or shrink at the low end. | |
514 void CardTableExtension::resize_covered_region(MemRegion new_region) { | |
515 | |
516 for (int i = 0; i < _cur_covered_regions; i++) { | |
517 if (_covered[i].start() == new_region.start()) { | |
518 // Found a covered region with the same start as the | |
519 // new region. The region is growing or shrinking | |
520 // from the start of the region. | |
521 resize_covered_region_by_start(new_region); | |
522 return; | |
523 } | |
524 if (_covered[i].start() > new_region.start()) { | |
525 break; | |
526 } | |
527 } | |
528 | |
529 int changed_region = -1; | |
530 for (int j = 0; j < _cur_covered_regions; j++) { | |
531 if (_covered[j].end() == new_region.end()) { | |
532 changed_region = j; | |
533 // This is a case where the covered region is growing or shrinking | |
534 // at the start of the region. | |
535 assert(changed_region != -1, "Don't expect to add a covered region"); | |
536 assert(_covered[changed_region].byte_size() != new_region.byte_size(), | |
537 "The sizes should be different here"); | |
538 resize_covered_region_by_end(changed_region, new_region); | |
539 return; | |
540 } | |
541 } | |
542 // This should only be a new covered region (where no existing | |
543 // covered region matches at the start or the end). | |
544 assert(_cur_covered_regions < _max_covered_regions, | |
545 "An existing region should have been found"); | |
546 resize_covered_region_by_start(new_region); | |
547 } | |
548 | |
549 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { | |
550 CardTableModRefBS::resize_covered_region(new_region); | |
551 debug_only(verify_guard();) | |
552 } | |
553 | |
554 void CardTableExtension::resize_covered_region_by_end(int changed_region, | |
555 MemRegion new_region) { | |
556 assert(SafepointSynchronize::is_at_safepoint(), | |
557 "Only expect an expansion at the low end at a GC"); | |
558 debug_only(verify_guard();) | |
559 #ifdef ASSERT | |
560 for (int k = 0; k < _cur_covered_regions; k++) { | |
561 if (_covered[k].end() == new_region.end()) { | |
562 assert(changed_region == k, "Changed region is incorrect"); | |
563 break; | |
564 } | |
565 } | |
566 #endif | |
567 | |
568 // Commit new or uncommit old pages, if necessary. | |
569 resize_commit_uncommit(changed_region, new_region); | |
570 | |
571 // Update card table entries | |
572 resize_update_card_table_entries(changed_region, new_region); | |
573 | |
574 // Set the new start of the committed region | |
575 resize_update_committed_table(changed_region, new_region); | |
576 | |
577 // Update the covered region | |
578 resize_update_covered_table(changed_region, new_region); | |
579 | |
580 if (TraceCardTableModRefBS) { | |
581 int ind = changed_region; | |
582 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); | |
583 gclog_or_tty->print_cr(" " | |
584 " _covered[%d].start(): " INTPTR_FORMAT | |
585 " _covered[%d].last(): " INTPTR_FORMAT, | |
586 ind, _covered[ind].start(), | |
587 ind, _covered[ind].last()); | |
588 gclog_or_tty->print_cr(" " | |
589 " _committed[%d].start(): " INTPTR_FORMAT | |
590 " _committed[%d].last(): " INTPTR_FORMAT, | |
591 ind, _committed[ind].start(), | |
592 ind, _committed[ind].last()); | |
593 gclog_or_tty->print_cr(" " | |
594 " byte_for(start): " INTPTR_FORMAT | |
595 " byte_for(last): " INTPTR_FORMAT, | |
596 byte_for(_covered[ind].start()), | |
597 byte_for(_covered[ind].last())); | |
598 gclog_or_tty->print_cr(" " | |
599 " addr_for(start): " INTPTR_FORMAT | |
600 " addr_for(last): " INTPTR_FORMAT, | |
601 addr_for((jbyte*) _committed[ind].start()), | |
602 addr_for((jbyte*) _committed[ind].last())); | |
603 } | |
604 debug_only(verify_guard();) | |
605 } | |
606 | |
607 void CardTableExtension::resize_commit_uncommit(int changed_region, | |
608 MemRegion new_region) { | |
609 // Commit new or uncommit old pages, if necessary. | |
610 MemRegion cur_committed = _committed[changed_region]; | |
611 assert(_covered[changed_region].end() == new_region.end(), | |
612 "The ends of the regions are expected to match"); | |
613 // Extend the start of this _committed region to | |
614 // to cover the start of any previous _committed region. | |
615 // This forms overlapping regions, but never interior regions. | |
616 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); | |
617 if (min_prev_start < cur_committed.start()) { | |
618 // Only really need to set start of "cur_committed" to | |
619 // the new start (min_prev_start) but assertion checking code | |
620 // below use cur_committed.end() so make it correct. | |
621 MemRegion new_committed = | |
622 MemRegion(min_prev_start, cur_committed.end()); | |
623 cur_committed = new_committed; | |
624 } | |
625 #ifdef ASSERT | |
626 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
627 assert(cur_committed.start() == | |
628 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), | |
629 os::vm_page_size()), | |
630 "Starts should have proper alignment"); | |
631 #endif | |
632 | |
633 jbyte* new_start = byte_for(new_region.start()); | |
634 // Round down because this is for the start address | |
635 HeapWord* new_start_aligned = | |
636 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); | |
637 // The guard page is always committed and should not be committed over. | |
638 // This method is used in cases where the generation is growing toward | |
639 // lower addresses but the guard region is still at the end of the | |
640 // card table. That still makes sense when looking for writes | |
641 // off the end of the card table. | |
642 if (new_start_aligned < cur_committed.start()) { | |
643 // Expand the committed region | |
644 // | |
645 // Case A | |
646 // |+ guard +| | |
647 // |+ cur committed +++++++++| | |
648 // |+ new committed +++++++++++++++++| | |
649 // | |
650 // Case B | |
651 // |+ guard +| | |
652 // |+ cur committed +| | |
653 // |+ new committed +++++++| | |
654 // | |
655 // These are not expected because the calculation of the | |
656 // cur committed region and the new committed region | |
657 // share the same end for the covered region. | |
658 // Case C | |
659 // |+ guard +| | |
660 // |+ cur committed +| | |
661 // |+ new committed +++++++++++++++++| | |
662 // Case D | |
663 // |+ guard +| | |
664 // |+ cur committed +++++++++++| | |
665 // |+ new committed +++++++| | |
666 | |
667 HeapWord* new_end_for_commit = | |
668 MIN2(cur_committed.end(), _guard_region.start()); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
669 if(new_start_aligned < new_end_for_commit) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
670 MemRegion new_committed = |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
671 MemRegion(new_start_aligned, new_end_for_commit); |
0 | 672 if (!os::commit_memory((char*)new_committed.start(), |
673 new_committed.byte_size())) { | |
674 vm_exit_out_of_memory(new_committed.byte_size(), | |
675 "card table expansion"); | |
676 } | |
677 } | |
678 } else if (new_start_aligned > cur_committed.start()) { | |
679 // Shrink the committed region | |
680 MemRegion uncommit_region = committed_unique_to_self(changed_region, | |
681 MemRegion(cur_committed.start(), new_start_aligned)); | |
682 if (!uncommit_region.is_empty()) { | |
683 if (!os::uncommit_memory((char*)uncommit_region.start(), | |
684 uncommit_region.byte_size())) { | |
685 vm_exit_out_of_memory(uncommit_region.byte_size(), | |
686 "card table contraction"); | |
687 } | |
688 } | |
689 } | |
690 assert(_committed[changed_region].end() == cur_committed.end(), | |
691 "end should not change"); | |
692 } | |
693 | |
694 void CardTableExtension::resize_update_committed_table(int changed_region, | |
695 MemRegion new_region) { | |
696 | |
697 jbyte* new_start = byte_for(new_region.start()); | |
698 // Set the new start of the committed region | |
699 HeapWord* new_start_aligned = | |
700 (HeapWord*)align_size_down((uintptr_t)new_start, | |
701 os::vm_page_size()); | |
702 MemRegion new_committed = MemRegion(new_start_aligned, | |
703 _committed[changed_region].end()); | |
704 _committed[changed_region] = new_committed; | |
705 _committed[changed_region].set_start(new_start_aligned); | |
706 } | |
707 | |
708 void CardTableExtension::resize_update_card_table_entries(int changed_region, | |
709 MemRegion new_region) { | |
710 debug_only(verify_guard();) | |
711 MemRegion original_covered = _covered[changed_region]; | |
712 // Initialize the card entries. Only consider the | |
713 // region covered by the card table (_whole_heap) | |
714 jbyte* entry; | |
715 if (new_region.start() < _whole_heap.start()) { | |
716 entry = byte_for(_whole_heap.start()); | |
717 } else { | |
718 entry = byte_for(new_region.start()); | |
719 } | |
720 jbyte* end = byte_for(original_covered.start()); | |
721 // If _whole_heap starts at the original covered regions start, | |
722 // this loop will not execute. | |
723 while (entry < end) { *entry++ = clean_card; } | |
724 } | |
725 | |
726 void CardTableExtension::resize_update_covered_table(int changed_region, | |
727 MemRegion new_region) { | |
728 // Update the covered region | |
729 _covered[changed_region].set_start(new_region.start()); | |
730 _covered[changed_region].set_word_size(new_region.word_size()); | |
731 | |
732 // reorder regions. There should only be at most 1 out | |
733 // of order. | |
734 for (int i = _cur_covered_regions-1 ; i > 0; i--) { | |
735 if (_covered[i].start() < _covered[i-1].start()) { | |
736 MemRegion covered_mr = _covered[i-1]; | |
737 _covered[i-1] = _covered[i]; | |
738 _covered[i] = covered_mr; | |
739 MemRegion committed_mr = _committed[i-1]; | |
740 _committed[i-1] = _committed[i]; | |
741 _committed[i] = committed_mr; | |
742 break; | |
743 } | |
744 } | |
745 #ifdef ASSERT | |
746 for (int m = 0; m < _cur_covered_regions-1; m++) { | |
747 assert(_covered[m].start() <= _covered[m+1].start(), | |
748 "Covered regions out of order"); | |
749 assert(_committed[m].start() <= _committed[m+1].start(), | |
750 "Committed regions out of order"); | |
751 } | |
752 #endif | |
753 } | |
754 | |
755 // Returns the start of any committed region that is lower than | |
756 // the target committed region (index ind) and that intersects the | |
757 // target region. If none, return start of target region. | |
758 // | |
759 // ------------- | |
760 // | | | |
761 // ------------- | |
762 // ------------ | |
763 // | target | | |
764 // ------------ | |
765 // ------------- | |
766 // | | | |
767 // ------------- | |
768 // ^ returns this | |
769 // | |
770 // ------------- | |
771 // | | | |
772 // ------------- | |
773 // ------------ | |
774 // | target | | |
775 // ------------ | |
776 // ------------- | |
777 // | | | |
778 // ------------- | |
779 // ^ returns this | |
780 | |
781 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { | |
782 assert(_cur_covered_regions >= 0, "Expecting at least on region"); | |
783 HeapWord* min_start = _committed[ind].start(); | |
784 for (int j = 0; j < ind; j++) { | |
785 HeapWord* this_start = _committed[j].start(); | |
786 if ((this_start < min_start) && | |
787 !(_committed[j].intersection(_committed[ind])).is_empty()) { | |
788 min_start = this_start; | |
789 } | |
790 } | |
791 return min_start; | |
792 } |