Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @ 235:9c2ecc2ffb12 jdk7-b31
Merge
author | trims |
---|---|
date | Fri, 11 Jul 2008 01:14:44 -0700 |
parents | d1605aabd0a1 05712c37c828 |
children | 1fdb98a17101 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_psParallelCompact.cpp.incl" | |
27 | |
28 #include <math.h> | |
29 | |
30 // All sizes are in HeapWords. | |
31 const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words | |
32 const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize; | |
33 const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize; | |
34 const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1; | |
35 const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1; | |
36 const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask; | |
37 | |
38 // 32-bit: 128 words covers 4 bitmap words | |
39 // 64-bit: 128 words covers 2 bitmap words | |
40 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words | |
41 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize; | |
42 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1; | |
43 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask; | |
44 | |
45 const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize; | |
46 | |
47 const ParallelCompactData::ChunkData::chunk_sz_t | |
48 ParallelCompactData::ChunkData::dc_shift = 27; | |
49 | |
50 const ParallelCompactData::ChunkData::chunk_sz_t | |
51 ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift; | |
52 | |
53 const ParallelCompactData::ChunkData::chunk_sz_t | |
54 ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift; | |
55 | |
56 const ParallelCompactData::ChunkData::chunk_sz_t | |
57 ParallelCompactData::ChunkData::los_mask = ~dc_mask; | |
58 | |
59 const ParallelCompactData::ChunkData::chunk_sz_t | |
60 ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift; | |
61 | |
62 const ParallelCompactData::ChunkData::chunk_sz_t | |
63 ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift; | |
64 | |
65 #ifdef ASSERT | |
66 short ParallelCompactData::BlockData::_cur_phase = 0; | |
67 #endif | |
68 | |
69 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; | |
70 bool PSParallelCompact::_print_phases = false; | |
71 | |
72 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; | |
73 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL; | |
74 | |
75 double PSParallelCompact::_dwl_mean; | |
76 double PSParallelCompact::_dwl_std_dev; | |
77 double PSParallelCompact::_dwl_first_term; | |
78 double PSParallelCompact::_dwl_adjustment; | |
79 #ifdef ASSERT | |
80 bool PSParallelCompact::_dwl_initialized = false; | |
81 #endif // #ifdef ASSERT | |
82 | |
83 #ifdef VALIDATE_MARK_SWEEP | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
84 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL; |
0 | 85 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL; |
86 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL; | |
87 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL; | |
88 size_t PSParallelCompact::_live_oops_index = 0; | |
89 size_t PSParallelCompact::_live_oops_index_at_perm = 0; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
90 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
91 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL; |
0 | 92 bool PSParallelCompact::_pointer_tracking = false; |
93 bool PSParallelCompact::_root_tracking = true; | |
94 | |
95 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL; | |
96 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL; | |
97 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL; | |
98 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL; | |
99 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL; | |
100 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL; | |
101 #endif | |
102 | |
103 // XXX beg - verification code; only works while we also mark in object headers | |
104 static void | |
105 verify_mark_bitmap(ParMarkBitMap& _mark_bitmap) | |
106 { | |
107 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); | |
108 | |
109 PSPermGen* perm_gen = heap->perm_gen(); | |
110 PSOldGen* old_gen = heap->old_gen(); | |
111 PSYoungGen* young_gen = heap->young_gen(); | |
112 | |
113 MutableSpace* perm_space = perm_gen->object_space(); | |
114 MutableSpace* old_space = old_gen->object_space(); | |
115 MutableSpace* eden_space = young_gen->eden_space(); | |
116 MutableSpace* from_space = young_gen->from_space(); | |
117 MutableSpace* to_space = young_gen->to_space(); | |
118 | |
119 // 'from_space' here is the survivor space at the lower address. | |
120 if (to_space->bottom() < from_space->bottom()) { | |
121 from_space = to_space; | |
122 to_space = young_gen->from_space(); | |
123 } | |
124 | |
125 HeapWord* boundaries[12]; | |
126 unsigned int bidx = 0; | |
127 const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]); | |
128 | |
129 boundaries[0] = perm_space->bottom(); | |
130 boundaries[1] = perm_space->top(); | |
131 boundaries[2] = old_space->bottom(); | |
132 boundaries[3] = old_space->top(); | |
133 boundaries[4] = eden_space->bottom(); | |
134 boundaries[5] = eden_space->top(); | |
135 boundaries[6] = from_space->bottom(); | |
136 boundaries[7] = from_space->top(); | |
137 boundaries[8] = to_space->bottom(); | |
138 boundaries[9] = to_space->top(); | |
139 boundaries[10] = to_space->end(); | |
140 boundaries[11] = to_space->end(); | |
141 | |
142 BitMap::idx_t beg_bit = 0; | |
143 BitMap::idx_t end_bit; | |
144 BitMap::idx_t tmp_bit; | |
145 const BitMap::idx_t last_bit = _mark_bitmap.size(); | |
146 do { | |
147 HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit); | |
148 if (_mark_bitmap.is_marked(beg_bit)) { | |
149 oop obj = (oop)addr; | |
150 assert(obj->is_gc_marked(), "obj header is not marked"); | |
151 end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit); | |
152 const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit); | |
153 assert(size == (size_t)obj->size(), "end bit wrong?"); | |
154 beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit); | |
155 assert(beg_bit > end_bit, "bit set in middle of an obj"); | |
156 } else { | |
157 if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) { | |
158 // a dead object in the current space. | |
159 oop obj = (oop)addr; | |
160 end_bit = _mark_bitmap.addr_to_bit(addr + obj->size()); | |
161 assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap"); | |
162 tmp_bit = beg_bit + 1; | |
163 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); | |
164 assert(beg_bit == end_bit, "beg bit set in unmarked obj"); | |
165 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); | |
166 assert(beg_bit == end_bit, "end bit set in unmarked obj"); | |
167 } else if (addr < boundaries[bidx + 2]) { | |
168 // addr is between top in the current space and bottom in the next. | |
169 end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr); | |
170 tmp_bit = beg_bit; | |
171 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); | |
172 assert(beg_bit == end_bit, "beg bit set above top"); | |
173 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); | |
174 assert(beg_bit == end_bit, "end bit set above top"); | |
175 bidx += 2; | |
176 } else if (bidx < bidx_max - 2) { | |
177 bidx += 2; // ??? | |
178 } else { | |
179 tmp_bit = beg_bit; | |
180 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit); | |
181 assert(beg_bit == last_bit, "beg bit set outside heap"); | |
182 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit); | |
183 assert(beg_bit == last_bit, "end bit set outside heap"); | |
184 } | |
185 } | |
186 } while (beg_bit < last_bit); | |
187 } | |
188 // XXX end - verification code; only works while we also mark in object headers | |
189 | |
190 #ifndef PRODUCT | |
191 const char* PSParallelCompact::space_names[] = { | |
192 "perm", "old ", "eden", "from", "to " | |
193 }; | |
194 | |
195 void PSParallelCompact::print_chunk_ranges() | |
196 { | |
197 tty->print_cr("space bottom top end new_top"); | |
198 tty->print_cr("------ ---------- ---------- ---------- ----------"); | |
199 | |
200 for (unsigned int id = 0; id < last_space_id; ++id) { | |
201 const MutableSpace* space = _space_info[id].space(); | |
202 tty->print_cr("%u %s " | |
203 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " " | |
204 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ", | |
205 id, space_names[id], | |
206 summary_data().addr_to_chunk_idx(space->bottom()), | |
207 summary_data().addr_to_chunk_idx(space->top()), | |
208 summary_data().addr_to_chunk_idx(space->end()), | |
209 summary_data().addr_to_chunk_idx(_space_info[id].new_top())); | |
210 } | |
211 } | |
212 | |
213 void | |
214 print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c) | |
215 { | |
216 #define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7") | |
217 #define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5") | |
218 | |
219 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
220 size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0; | |
221 tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " " | |
222 CHUNK_IDX_FORMAT " " PTR_FORMAT " " | |
223 CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " " | |
224 CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d", | |
225 i, c->data_location(), dci, c->destination(), | |
226 c->partial_obj_size(), c->live_obj_size(), | |
227 c->data_size(), c->source_chunk(), c->destination_count()); | |
228 | |
229 #undef CHUNK_IDX_FORMAT | |
230 #undef CHUNK_DATA_FORMAT | |
231 } | |
232 | |
233 void | |
234 print_generic_summary_data(ParallelCompactData& summary_data, | |
235 HeapWord* const beg_addr, | |
236 HeapWord* const end_addr) | |
237 { | |
238 size_t total_words = 0; | |
239 size_t i = summary_data.addr_to_chunk_idx(beg_addr); | |
240 const size_t last = summary_data.addr_to_chunk_idx(end_addr); | |
241 HeapWord* pdest = 0; | |
242 | |
243 while (i <= last) { | |
244 ParallelCompactData::ChunkData* c = summary_data.chunk(i); | |
245 if (c->data_size() != 0 || c->destination() != pdest) { | |
246 print_generic_summary_chunk(i, c); | |
247 total_words += c->data_size(); | |
248 pdest = c->destination(); | |
249 } | |
250 ++i; | |
251 } | |
252 | |
253 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize); | |
254 } | |
255 | |
256 void | |
257 print_generic_summary_data(ParallelCompactData& summary_data, | |
258 SpaceInfo* space_info) | |
259 { | |
260 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) { | |
261 const MutableSpace* space = space_info[id].space(); | |
262 print_generic_summary_data(summary_data, space->bottom(), | |
263 MAX2(space->top(), space_info[id].new_top())); | |
264 } | |
265 } | |
266 | |
267 void | |
268 print_initial_summary_chunk(size_t i, | |
269 const ParallelCompactData::ChunkData* c, | |
270 bool newline = true) | |
271 { | |
272 tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " " | |
273 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " " | |
274 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d", | |
275 i, c->destination(), | |
276 c->partial_obj_size(), c->live_obj_size(), | |
277 c->data_size(), c->source_chunk(), c->destination_count()); | |
278 if (newline) tty->cr(); | |
279 } | |
280 | |
281 void | |
282 print_initial_summary_data(ParallelCompactData& summary_data, | |
283 const MutableSpace* space) { | |
284 if (space->top() == space->bottom()) { | |
285 return; | |
286 } | |
287 | |
288 const size_t chunk_size = ParallelCompactData::ChunkSize; | |
289 HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top()); | |
290 const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up); | |
291 const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1); | |
292 HeapWord* end_addr = c->destination() + c->data_size(); | |
293 const size_t live_in_space = pointer_delta(end_addr, space->bottom()); | |
294 | |
295 // Print (and count) the full chunks at the beginning of the space. | |
296 size_t full_chunk_count = 0; | |
297 size_t i = summary_data.addr_to_chunk_idx(space->bottom()); | |
298 while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) { | |
299 print_initial_summary_chunk(i, summary_data.chunk(i)); | |
300 ++full_chunk_count; | |
301 ++i; | |
302 } | |
303 | |
304 size_t live_to_right = live_in_space - full_chunk_count * chunk_size; | |
305 | |
306 double max_reclaimed_ratio = 0.0; | |
307 size_t max_reclaimed_ratio_chunk = 0; | |
308 size_t max_dead_to_right = 0; | |
309 size_t max_live_to_right = 0; | |
310 | |
311 // Print the 'reclaimed ratio' for chunks while there is something live in the | |
312 // chunk or to the right of it. The remaining chunks are empty (and | |
313 // uninteresting), and computing the ratio will result in division by 0. | |
314 while (i < end_chunk && live_to_right > 0) { | |
315 c = summary_data.chunk(i); | |
316 HeapWord* const chunk_addr = summary_data.chunk_to_addr(i); | |
317 const size_t used_to_right = pointer_delta(space->top(), chunk_addr); | |
318 const size_t dead_to_right = used_to_right - live_to_right; | |
319 const double reclaimed_ratio = double(dead_to_right) / live_to_right; | |
320 | |
321 if (reclaimed_ratio > max_reclaimed_ratio) { | |
322 max_reclaimed_ratio = reclaimed_ratio; | |
323 max_reclaimed_ratio_chunk = i; | |
324 max_dead_to_right = dead_to_right; | |
325 max_live_to_right = live_to_right; | |
326 } | |
327 | |
328 print_initial_summary_chunk(i, c, false); | |
329 tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"), | |
330 reclaimed_ratio, dead_to_right, live_to_right); | |
331 | |
332 live_to_right -= c->data_size(); | |
333 ++i; | |
334 } | |
335 | |
336 // Any remaining chunks are empty. Print one more if there is one. | |
337 if (i < end_chunk) { | |
338 print_initial_summary_chunk(i, summary_data.chunk(i)); | |
339 } | |
340 | |
341 tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " " | |
342 "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f", | |
343 max_reclaimed_ratio_chunk, max_dead_to_right, | |
344 max_live_to_right, max_reclaimed_ratio); | |
345 } | |
346 | |
347 void | |
348 print_initial_summary_data(ParallelCompactData& summary_data, | |
349 SpaceInfo* space_info) { | |
350 unsigned int id = PSParallelCompact::perm_space_id; | |
351 const MutableSpace* space; | |
352 do { | |
353 space = space_info[id].space(); | |
354 print_initial_summary_data(summary_data, space); | |
355 } while (++id < PSParallelCompact::eden_space_id); | |
356 | |
357 do { | |
358 space = space_info[id].space(); | |
359 print_generic_summary_data(summary_data, space->bottom(), space->top()); | |
360 } while (++id < PSParallelCompact::last_space_id); | |
361 } | |
362 #endif // #ifndef PRODUCT | |
363 | |
364 #ifdef ASSERT | |
365 size_t add_obj_count; | |
366 size_t add_obj_size; | |
367 size_t mark_bitmap_count; | |
368 size_t mark_bitmap_size; | |
369 #endif // #ifdef ASSERT | |
370 | |
371 ParallelCompactData::ParallelCompactData() | |
372 { | |
373 _region_start = 0; | |
374 | |
375 _chunk_vspace = 0; | |
376 _chunk_data = 0; | |
377 _chunk_count = 0; | |
378 | |
379 _block_vspace = 0; | |
380 _block_data = 0; | |
381 _block_count = 0; | |
382 } | |
383 | |
384 bool ParallelCompactData::initialize(MemRegion covered_region) | |
385 { | |
386 _region_start = covered_region.start(); | |
387 const size_t region_size = covered_region.word_size(); | |
388 DEBUG_ONLY(_region_end = _region_start + region_size;) | |
389 | |
390 assert(chunk_align_down(_region_start) == _region_start, | |
391 "region start not aligned"); | |
392 assert((region_size & ChunkSizeOffsetMask) == 0, | |
393 "region size not a multiple of ChunkSize"); | |
394 | |
395 bool result = initialize_chunk_data(region_size); | |
396 | |
397 // Initialize the block data if it will be used for updating pointers, or if | |
398 // this is a debug build. | |
399 if (!UseParallelOldGCChunkPointerCalc || trueInDebug) { | |
400 result = result && initialize_block_data(region_size); | |
401 } | |
402 | |
403 return result; | |
404 } | |
405 | |
406 PSVirtualSpace* | |
407 ParallelCompactData::create_vspace(size_t count, size_t element_size) | |
408 { | |
409 const size_t raw_bytes = count * element_size; | |
410 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10); | |
411 const size_t granularity = os::vm_allocation_granularity(); | |
412 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity)); | |
413 | |
414 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : | |
415 MAX2(page_sz, granularity); | |
79
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
416 ReservedSpace rs(bytes, rs_align, rs_align > 0); |
0 | 417 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), |
418 rs.size()); | |
419 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); | |
420 if (vspace != 0) { | |
421 if (vspace->expand_by(bytes)) { | |
422 return vspace; | |
423 } | |
424 delete vspace; | |
425 } | |
426 | |
427 return 0; | |
428 } | |
429 | |
430 bool ParallelCompactData::initialize_chunk_data(size_t region_size) | |
431 { | |
432 const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize; | |
433 _chunk_vspace = create_vspace(count, sizeof(ChunkData)); | |
434 if (_chunk_vspace != 0) { | |
435 _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr(); | |
436 _chunk_count = count; | |
437 return true; | |
438 } | |
439 return false; | |
440 } | |
441 | |
442 bool ParallelCompactData::initialize_block_data(size_t region_size) | |
443 { | |
444 const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize; | |
445 _block_vspace = create_vspace(count, sizeof(BlockData)); | |
446 if (_block_vspace != 0) { | |
447 _block_data = (BlockData*)_block_vspace->reserved_low_addr(); | |
448 _block_count = count; | |
449 return true; | |
450 } | |
451 return false; | |
452 } | |
453 | |
454 void ParallelCompactData::clear() | |
455 { | |
456 if (_block_data) { | |
457 memset(_block_data, 0, _block_vspace->committed_size()); | |
458 } | |
459 memset(_chunk_data, 0, _chunk_vspace->committed_size()); | |
460 } | |
461 | |
462 void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) { | |
463 assert(beg_chunk <= _chunk_count, "beg_chunk out of range"); | |
464 assert(end_chunk <= _chunk_count, "end_chunk out of range"); | |
465 assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize"); | |
466 | |
467 const size_t chunk_cnt = end_chunk - beg_chunk; | |
468 | |
469 if (_block_data) { | |
470 const size_t blocks_per_chunk = ChunkSize / BlockSize; | |
471 const size_t beg_block = beg_chunk * blocks_per_chunk; | |
472 const size_t block_cnt = chunk_cnt * blocks_per_chunk; | |
473 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData)); | |
474 } | |
475 memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData)); | |
476 } | |
477 | |
478 HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const | |
479 { | |
480 const ChunkData* cur_cp = chunk(chunk_idx); | |
481 const ChunkData* const end_cp = chunk(chunk_count() - 1); | |
482 | |
483 HeapWord* result = chunk_to_addr(chunk_idx); | |
484 if (cur_cp < end_cp) { | |
485 do { | |
486 result += cur_cp->partial_obj_size(); | |
487 } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp); | |
488 } | |
489 return result; | |
490 } | |
491 | |
492 void ParallelCompactData::add_obj(HeapWord* addr, size_t len) | |
493 { | |
494 const size_t obj_ofs = pointer_delta(addr, _region_start); | |
495 const size_t beg_chunk = obj_ofs >> Log2ChunkSize; | |
496 const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize; | |
497 | |
498 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);) | |
499 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) | |
500 | |
501 if (beg_chunk == end_chunk) { | |
502 // All in one chunk. | |
503 _chunk_data[beg_chunk].add_live_obj(len); | |
504 return; | |
505 } | |
506 | |
507 // First chunk. | |
508 const size_t beg_ofs = chunk_offset(addr); | |
509 _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs); | |
510 | |
511 klassOop klass = ((oop)addr)->klass(); | |
512 // Middle chunks--completely spanned by this object. | |
513 for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) { | |
514 _chunk_data[chunk].set_partial_obj_size(ChunkSize); | |
515 _chunk_data[chunk].set_partial_obj_addr(addr); | |
516 } | |
517 | |
518 // Last chunk. | |
519 const size_t end_ofs = chunk_offset(addr + len - 1); | |
520 _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1); | |
521 _chunk_data[end_chunk].set_partial_obj_addr(addr); | |
522 } | |
523 | |
524 void | |
525 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) | |
526 { | |
527 assert(chunk_offset(beg) == 0, "not ChunkSize aligned"); | |
528 assert(chunk_offset(end) == 0, "not ChunkSize aligned"); | |
529 | |
530 size_t cur_chunk = addr_to_chunk_idx(beg); | |
531 const size_t end_chunk = addr_to_chunk_idx(end); | |
532 HeapWord* addr = beg; | |
533 while (cur_chunk < end_chunk) { | |
534 _chunk_data[cur_chunk].set_destination(addr); | |
535 _chunk_data[cur_chunk].set_destination_count(0); | |
536 _chunk_data[cur_chunk].set_source_chunk(cur_chunk); | |
537 _chunk_data[cur_chunk].set_data_location(addr); | |
538 | |
539 // Update live_obj_size so the chunk appears completely full. | |
540 size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size(); | |
541 _chunk_data[cur_chunk].set_live_obj_size(live_size); | |
542 | |
543 ++cur_chunk; | |
544 addr += ChunkSize; | |
545 } | |
546 } | |
547 | |
548 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, | |
549 HeapWord* source_beg, HeapWord* source_end, | |
550 HeapWord** target_next, | |
551 HeapWord** source_next) { | |
552 // This is too strict. | |
553 // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned"); | |
554 | |
555 if (TraceParallelOldGCSummaryPhase) { | |
556 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " | |
557 "sb=" PTR_FORMAT " se=" PTR_FORMAT " " | |
558 "tn=" PTR_FORMAT " sn=" PTR_FORMAT, | |
559 target_beg, target_end, | |
560 source_beg, source_end, | |
561 target_next != 0 ? *target_next : (HeapWord*) 0, | |
562 source_next != 0 ? *source_next : (HeapWord*) 0); | |
563 } | |
564 | |
565 size_t cur_chunk = addr_to_chunk_idx(source_beg); | |
566 const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end)); | |
567 | |
568 HeapWord *dest_addr = target_beg; | |
569 while (cur_chunk < end_chunk) { | |
570 size_t words = _chunk_data[cur_chunk].data_size(); | |
571 | |
572 #if 1 | |
573 assert(pointer_delta(target_end, dest_addr) >= words, | |
574 "source region does not fit into target region"); | |
575 #else | |
576 // XXX - need some work on the corner cases here. If the chunk does not | |
577 // fit, then must either make sure any partial_obj from the chunk fits, or | |
578 // 'undo' the initial part of the partial_obj that is in the previous chunk. | |
579 if (dest_addr + words >= target_end) { | |
580 // Let the caller know where to continue. | |
581 *target_next = dest_addr; | |
582 *source_next = chunk_to_addr(cur_chunk); | |
583 return false; | |
584 } | |
585 #endif // #if 1 | |
586 | |
587 _chunk_data[cur_chunk].set_destination(dest_addr); | |
588 | |
589 // Set the destination_count for cur_chunk, and if necessary, update | |
590 // source_chunk for a destination chunk. The source_chunk field is updated | |
591 // if cur_chunk is the first (left-most) chunk to be copied to a destination | |
592 // chunk. | |
593 // | |
594 // The destination_count calculation is a bit subtle. A chunk that has data | |
595 // that compacts into itself does not count itself as a destination. This | |
596 // maintains the invariant that a zero count means the chunk is available | |
597 // and can be claimed and then filled. | |
598 if (words > 0) { | |
599 HeapWord* const last_addr = dest_addr + words - 1; | |
600 const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr); | |
601 const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr); | |
602 #if 0 | |
603 // Initially assume that the destination chunks will be the same and | |
604 // adjust the value below if necessary. Under this assumption, if | |
605 // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely | |
606 // into itself. | |
607 uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1; | |
608 if (dest_chunk_1 != dest_chunk_2) { | |
609 // Destination chunks differ; adjust destination_count. | |
610 destination_count += 1; | |
611 // Data from cur_chunk will be copied to the start of dest_chunk_2. | |
612 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); | |
613 } else if (chunk_offset(dest_addr) == 0) { | |
614 // Data from cur_chunk will be copied to the start of the destination | |
615 // chunk. | |
616 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); | |
617 } | |
618 #else | |
619 // Initially assume that the destination chunks will be different and | |
620 // adjust the value below if necessary. Under this assumption, if | |
621 // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially | |
622 // into dest_chunk_1 and partially into itself. | |
623 uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2; | |
624 if (dest_chunk_1 != dest_chunk_2) { | |
625 // Data from cur_chunk will be copied to the start of dest_chunk_2. | |
626 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); | |
627 } else { | |
628 // Destination chunks are the same; adjust destination_count. | |
629 destination_count -= 1; | |
630 if (chunk_offset(dest_addr) == 0) { | |
631 // Data from cur_chunk will be copied to the start of the destination | |
632 // chunk. | |
633 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); | |
634 } | |
635 } | |
636 #endif // #if 0 | |
637 | |
638 _chunk_data[cur_chunk].set_destination_count(destination_count); | |
639 _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk)); | |
640 dest_addr += words; | |
641 } | |
642 | |
643 ++cur_chunk; | |
644 } | |
645 | |
646 *target_next = dest_addr; | |
647 return true; | |
648 } | |
649 | |
650 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) { | |
651 HeapWord* block_addr = block_to_addr(block_index); | |
652 HeapWord* block_end_addr = block_addr + BlockSize; | |
653 size_t chunk_index = addr_to_chunk_idx(block_addr); | |
654 HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index); | |
655 | |
656 // An object that ends at the end of the block, ends | |
657 // in the block (the last word of the object is to | |
658 // the left of the end). | |
659 if ((block_addr < partial_obj_end_addr) && | |
660 (partial_obj_end_addr <= block_end_addr)) { | |
661 return true; | |
662 } | |
663 | |
664 return false; | |
665 } | |
666 | |
667 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { | |
668 HeapWord* result = NULL; | |
669 if (UseParallelOldGCChunkPointerCalc) { | |
670 result = chunk_calc_new_pointer(addr); | |
671 } else { | |
672 result = block_calc_new_pointer(addr); | |
673 } | |
674 return result; | |
675 } | |
676 | |
677 // This method is overly complicated (expensive) to be called | |
678 // for every reference. | |
679 // Try to restructure this so that a NULL is returned if | |
680 // the object is dead. But don't wast the cycles to explicitly check | |
681 // that it is dead since only live objects should be passed in. | |
682 | |
683 HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) { | |
684 assert(addr != NULL, "Should detect NULL oop earlier"); | |
685 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); | |
686 #ifdef ASSERT | |
687 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { | |
688 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); | |
689 } | |
690 #endif | |
691 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); | |
692 | |
693 // Chunk covering the object. | |
694 size_t chunk_index = addr_to_chunk_idx(addr); | |
695 const ChunkData* const chunk_ptr = chunk(chunk_index); | |
696 HeapWord* const chunk_addr = chunk_align_down(addr); | |
697 | |
698 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); | |
699 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); | |
700 | |
701 HeapWord* result = chunk_ptr->destination(); | |
702 | |
703 // If all the data in the chunk is live, then the new location of the object | |
704 // can be calculated from the destination of the chunk plus the offset of the | |
705 // object in the chunk. | |
706 if (chunk_ptr->data_size() == ChunkSize) { | |
707 result += pointer_delta(addr, chunk_addr); | |
708 return result; | |
709 } | |
710 | |
711 // The new location of the object is | |
712 // chunk destination + | |
713 // size of the partial object extending onto the chunk + | |
714 // sizes of the live objects in the Chunk that are to the left of addr | |
715 const size_t partial_obj_size = chunk_ptr->partial_obj_size(); | |
716 HeapWord* const search_start = chunk_addr + partial_obj_size; | |
717 | |
718 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); | |
719 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); | |
720 | |
721 result += partial_obj_size + live_to_left; | |
722 assert(result <= addr, "object cannot move to the right"); | |
723 return result; | |
724 } | |
725 | |
726 HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) { | |
727 assert(addr != NULL, "Should detect NULL oop earlier"); | |
728 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); | |
729 #ifdef ASSERT | |
730 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { | |
731 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); | |
732 } | |
733 #endif | |
734 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); | |
735 | |
736 // Chunk covering the object. | |
737 size_t chunk_index = addr_to_chunk_idx(addr); | |
738 const ChunkData* const chunk_ptr = chunk(chunk_index); | |
739 HeapWord* const chunk_addr = chunk_align_down(addr); | |
740 | |
741 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); | |
742 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); | |
743 | |
744 HeapWord* result = chunk_ptr->destination(); | |
745 | |
746 // If all the data in the chunk is live, then the new location of the object | |
747 // can be calculated from the destination of the chunk plus the offset of the | |
748 // object in the chunk. | |
749 if (chunk_ptr->data_size() == ChunkSize) { | |
750 result += pointer_delta(addr, chunk_addr); | |
751 return result; | |
752 } | |
753 | |
754 // The new location of the object is | |
755 // chunk destination + | |
756 // block offset + | |
757 // sizes of the live objects in the Block that are to the left of addr | |
758 const size_t block_offset = addr_to_block_ptr(addr)->offset(); | |
759 HeapWord* const search_start = chunk_addr + block_offset; | |
760 | |
761 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); | |
762 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); | |
763 | |
764 result += block_offset + live_to_left; | |
765 assert(result <= addr, "object cannot move to the right"); | |
766 assert(result == chunk_calc_new_pointer(addr), "Should match"); | |
767 return result; | |
768 } | |
769 | |
770 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) { | |
771 klassOop updated_klass; | |
772 if (PSParallelCompact::should_update_klass(old_klass)) { | |
773 updated_klass = (klassOop) calc_new_pointer(old_klass); | |
774 } else { | |
775 updated_klass = old_klass; | |
776 } | |
777 | |
778 return updated_klass; | |
779 } | |
780 | |
781 #ifdef ASSERT | |
782 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace) | |
783 { | |
784 const size_t* const beg = (const size_t*)vspace->committed_low_addr(); | |
785 const size_t* const end = (const size_t*)vspace->committed_high_addr(); | |
786 for (const size_t* p = beg; p < end; ++p) { | |
787 assert(*p == 0, "not zero"); | |
788 } | |
789 } | |
790 | |
791 void ParallelCompactData::verify_clear() | |
792 { | |
793 verify_clear(_chunk_vspace); | |
794 verify_clear(_block_vspace); | |
795 } | |
796 #endif // #ifdef ASSERT | |
797 | |
798 #ifdef NOT_PRODUCT | |
799 ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) { | |
800 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
801 return sd.chunk(chunk_index); | |
802 } | |
803 #endif | |
804 | |
805 elapsedTimer PSParallelCompact::_accumulated_time; | |
806 unsigned int PSParallelCompact::_total_invocations = 0; | |
807 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; | |
808 jlong PSParallelCompact::_time_of_last_gc = 0; | |
809 CollectorCounters* PSParallelCompact::_counters = NULL; | |
810 ParMarkBitMap PSParallelCompact::_mark_bitmap; | |
811 ParallelCompactData PSParallelCompact::_summary_data; | |
812 | |
813 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
814 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
815 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
816 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
817 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
818 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
819 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
820 |
0 | 821 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); |
822 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); | |
823 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
824 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
825 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
826 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
827 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
828 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
829 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
830 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } |
0 | 831 |
832 void PSParallelCompact::post_initialize() { | |
833 ParallelScavengeHeap* heap = gc_heap(); | |
834 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
835 | |
836 MemRegion mr = heap->reserved_region(); | |
837 _ref_processor = ReferenceProcessor::create_ref_processor( | |
838 mr, // span | |
839 true, // atomic_discovery | |
840 true, // mt_discovery | |
841 &_is_alive_closure, | |
842 ParallelGCThreads, | |
843 ParallelRefProcEnabled); | |
844 _counters = new CollectorCounters("PSParallelCompact", 1); | |
845 | |
846 // Initialize static fields in ParCompactionManager. | |
847 ParCompactionManager::initialize(mark_bitmap()); | |
848 } | |
849 | |
850 bool PSParallelCompact::initialize() { | |
851 ParallelScavengeHeap* heap = gc_heap(); | |
852 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
853 MemRegion mr = heap->reserved_region(); | |
854 | |
855 // Was the old gen get allocated successfully? | |
856 if (!heap->old_gen()->is_allocated()) { | |
857 return false; | |
858 } | |
859 | |
860 initialize_space_info(); | |
861 initialize_dead_wood_limiter(); | |
862 | |
863 if (!_mark_bitmap.initialize(mr)) { | |
864 vm_shutdown_during_initialization("Unable to allocate bit map for " | |
865 "parallel garbage collection for the requested heap size."); | |
866 return false; | |
867 } | |
868 | |
869 if (!_summary_data.initialize(mr)) { | |
870 vm_shutdown_during_initialization("Unable to allocate tables for " | |
871 "parallel garbage collection for the requested heap size."); | |
872 return false; | |
873 } | |
874 | |
875 return true; | |
876 } | |
877 | |
878 void PSParallelCompact::initialize_space_info() | |
879 { | |
880 memset(&_space_info, 0, sizeof(_space_info)); | |
881 | |
882 ParallelScavengeHeap* heap = gc_heap(); | |
883 PSYoungGen* young_gen = heap->young_gen(); | |
884 MutableSpace* perm_space = heap->perm_gen()->object_space(); | |
885 | |
886 _space_info[perm_space_id].set_space(perm_space); | |
887 _space_info[old_space_id].set_space(heap->old_gen()->object_space()); | |
888 _space_info[eden_space_id].set_space(young_gen->eden_space()); | |
889 _space_info[from_space_id].set_space(young_gen->from_space()); | |
890 _space_info[to_space_id].set_space(young_gen->to_space()); | |
891 | |
892 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array()); | |
893 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); | |
894 | |
895 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top()); | |
896 if (TraceParallelOldGCDensePrefix) { | |
897 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT, | |
898 _space_info[perm_space_id].min_dense_prefix()); | |
899 } | |
900 } | |
901 | |
902 void PSParallelCompact::initialize_dead_wood_limiter() | |
903 { | |
904 const size_t max = 100; | |
905 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0; | |
906 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0; | |
907 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev); | |
908 DEBUG_ONLY(_dwl_initialized = true;) | |
909 _dwl_adjustment = normal_distribution(1.0); | |
910 } | |
911 | |
912 // Simple class for storing info about the heap at the start of GC, to be used | |
913 // after GC for comparison/printing. | |
914 class PreGCValues { | |
915 public: | |
916 PreGCValues() { } | |
917 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); } | |
918 | |
919 void fill(ParallelScavengeHeap* heap) { | |
920 _heap_used = heap->used(); | |
921 _young_gen_used = heap->young_gen()->used_in_bytes(); | |
922 _old_gen_used = heap->old_gen()->used_in_bytes(); | |
923 _perm_gen_used = heap->perm_gen()->used_in_bytes(); | |
924 }; | |
925 | |
926 size_t heap_used() const { return _heap_used; } | |
927 size_t young_gen_used() const { return _young_gen_used; } | |
928 size_t old_gen_used() const { return _old_gen_used; } | |
929 size_t perm_gen_used() const { return _perm_gen_used; } | |
930 | |
931 private: | |
932 size_t _heap_used; | |
933 size_t _young_gen_used; | |
934 size_t _old_gen_used; | |
935 size_t _perm_gen_used; | |
936 }; | |
937 | |
938 void | |
939 PSParallelCompact::clear_data_covering_space(SpaceId id) | |
940 { | |
941 // At this point, top is the value before GC, new_top() is the value that will | |
942 // be set at the end of GC. The marking bitmap is cleared to top; nothing | |
943 // should be marked above top. The summary data is cleared to the larger of | |
944 // top & new_top. | |
945 MutableSpace* const space = _space_info[id].space(); | |
946 HeapWord* const bot = space->bottom(); | |
947 HeapWord* const top = space->top(); | |
948 HeapWord* const max_top = MAX2(top, _space_info[id].new_top()); | |
949 | |
950 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot); | |
951 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top)); | |
952 _mark_bitmap.clear_range(beg_bit, end_bit); | |
953 | |
954 const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot); | |
955 const size_t end_chunk = | |
956 _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top)); | |
957 _summary_data.clear_range(beg_chunk, end_chunk); | |
958 } | |
959 | |
960 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) | |
961 { | |
962 // Update the from & to space pointers in space_info, since they are swapped | |
963 // at each young gen gc. Do the update unconditionally (even though a | |
964 // promotion failure does not swap spaces) because an unknown number of minor | |
965 // collections will have swapped the spaces an unknown number of times. | |
966 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty); | |
967 ParallelScavengeHeap* heap = gc_heap(); | |
968 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); | |
969 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); | |
970 | |
971 pre_gc_values->fill(heap); | |
972 | |
973 ParCompactionManager::reset(); | |
974 NOT_PRODUCT(_mark_bitmap.reset_counters()); | |
975 DEBUG_ONLY(add_obj_count = add_obj_size = 0;) | |
976 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) | |
977 | |
978 // Increment the invocation count | |
139
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
0
diff
changeset
|
979 heap->increment_total_collections(true); |
0 | 980 |
981 // We need to track unique mark sweep invocations as well. | |
982 _total_invocations++; | |
983 | |
984 if (PrintHeapAtGC) { | |
985 Universe::print_heap_before_gc(); | |
986 } | |
987 | |
988 // Fill in TLABs | |
989 heap->accumulate_statistics_all_tlabs(); | |
990 heap->ensure_parsability(true); // retire TLABs | |
991 | |
992 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { | |
993 HandleMark hm; // Discard invalid handles created during verification | |
994 gclog_or_tty->print(" VerifyBeforeGC:"); | |
995 Universe::verify(true); | |
996 } | |
997 | |
998 // Verify object start arrays | |
999 if (VerifyObjectStartArray && | |
1000 VerifyBeforeGC) { | |
1001 heap->old_gen()->verify_object_start_array(); | |
1002 heap->perm_gen()->verify_object_start_array(); | |
1003 } | |
1004 | |
1005 DEBUG_ONLY(mark_bitmap()->verify_clear();) | |
1006 DEBUG_ONLY(summary_data().verify_clear();) | |
210 | 1007 |
1008 // Have worker threads release resources the next time they run a task. | |
1009 gc_task_manager()->release_all_resources(); | |
0 | 1010 } |
1011 | |
1012 void PSParallelCompact::post_compact() | |
1013 { | |
1014 TraceTime tm("post compact", print_phases(), true, gclog_or_tty); | |
1015 | |
1016 // Clear the marking bitmap and summary data and update top() in each space. | |
1017 for (unsigned int id = perm_space_id; id < last_space_id; ++id) { | |
1018 clear_data_covering_space(SpaceId(id)); | |
1019 _space_info[id].space()->set_top(_space_info[id].new_top()); | |
1020 } | |
1021 | |
1022 MutableSpace* const eden_space = _space_info[eden_space_id].space(); | |
1023 MutableSpace* const from_space = _space_info[from_space_id].space(); | |
1024 MutableSpace* const to_space = _space_info[to_space_id].space(); | |
1025 | |
1026 ParallelScavengeHeap* heap = gc_heap(); | |
1027 bool eden_empty = eden_space->is_empty(); | |
1028 if (!eden_empty) { | |
1029 eden_empty = absorb_live_data_from_eden(heap->size_policy(), | |
1030 heap->young_gen(), heap->old_gen()); | |
1031 } | |
1032 | |
1033 // Update heap occupancy information which is used as input to the soft ref | |
1034 // clearing policy at the next gc. | |
1035 Universe::update_heap_info_at_gc(); | |
1036 | |
1037 bool young_gen_empty = eden_empty && from_space->is_empty() && | |
1038 to_space->is_empty(); | |
1039 | |
1040 BarrierSet* bs = heap->barrier_set(); | |
1041 if (bs->is_a(BarrierSet::ModRef)) { | |
1042 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; | |
1043 MemRegion old_mr = heap->old_gen()->reserved(); | |
1044 MemRegion perm_mr = heap->perm_gen()->reserved(); | |
1045 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); | |
1046 | |
1047 if (young_gen_empty) { | |
1048 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); | |
1049 } else { | |
1050 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); | |
1051 } | |
1052 } | |
1053 | |
1054 Threads::gc_epilogue(); | |
1055 CodeCache::gc_epilogue(); | |
1056 | |
1057 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1058 | |
1059 ref_processor()->enqueue_discovered_references(NULL); | |
1060 | |
1061 // Update time of last GC | |
1062 reset_millis_since_last_gc(); | |
1063 } | |
1064 | |
1065 HeapWord* | |
1066 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id, | |
1067 bool maximum_compaction) | |
1068 { | |
1069 const size_t chunk_size = ParallelCompactData::ChunkSize; | |
1070 const ParallelCompactData& sd = summary_data(); | |
1071 | |
1072 const MutableSpace* const space = _space_info[id].space(); | |
1073 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); | |
1074 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom()); | |
1075 const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up); | |
1076 | |
1077 // Skip full chunks at the beginning of the space--they are necessarily part | |
1078 // of the dense prefix. | |
1079 size_t full_count = 0; | |
1080 const ChunkData* cp; | |
1081 for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) { | |
1082 ++full_count; | |
1083 } | |
1084 | |
1085 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); | |
1086 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; | |
1087 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval; | |
1088 if (maximum_compaction || cp == end_cp || interval_ended) { | |
1089 _maximum_compaction_gc_num = total_invocations(); | |
1090 return sd.chunk_to_addr(cp); | |
1091 } | |
1092 | |
1093 HeapWord* const new_top = _space_info[id].new_top(); | |
1094 const size_t space_live = pointer_delta(new_top, space->bottom()); | |
1095 const size_t space_used = space->used_in_words(); | |
1096 const size_t space_capacity = space->capacity_in_words(); | |
1097 | |
1098 const double cur_density = double(space_live) / space_capacity; | |
1099 const double deadwood_density = | |
1100 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density; | |
1101 const size_t deadwood_goal = size_t(space_capacity * deadwood_density); | |
1102 | |
1103 if (TraceParallelOldGCDensePrefix) { | |
1104 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT, | |
1105 cur_density, deadwood_density, deadwood_goal); | |
1106 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " " | |
1107 "space_cap=" SIZE_FORMAT, | |
1108 space_live, space_used, | |
1109 space_capacity); | |
1110 } | |
1111 | |
1112 // XXX - Use binary search? | |
1113 HeapWord* dense_prefix = sd.chunk_to_addr(cp); | |
1114 const ChunkData* full_cp = cp; | |
1115 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1); | |
1116 while (cp < end_cp) { | |
1117 HeapWord* chunk_destination = cp->destination(); | |
1118 const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination); | |
1119 if (TraceParallelOldGCDensePrefix && Verbose) { | |
1120 tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " " | |
1121 "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"), | |
1122 sd.chunk(cp), chunk_destination, | |
1123 dense_prefix, cur_deadwood); | |
1124 } | |
1125 | |
1126 if (cur_deadwood >= deadwood_goal) { | |
1127 // Found the chunk that has the correct amount of deadwood to the left. | |
1128 // This typically occurs after crossing a fairly sparse set of chunks, so | |
1129 // iterate backwards over those sparse chunks, looking for the chunk that | |
1130 // has the lowest density of live objects 'to the right.' | |
1131 size_t space_to_left = sd.chunk(cp) * chunk_size; | |
1132 size_t live_to_left = space_to_left - cur_deadwood; | |
1133 size_t space_to_right = space_capacity - space_to_left; | |
1134 size_t live_to_right = space_live - live_to_left; | |
1135 double density_to_right = double(live_to_right) / space_to_right; | |
1136 while (cp > full_cp) { | |
1137 --cp; | |
1138 const size_t prev_chunk_live_to_right = live_to_right - cp->data_size(); | |
1139 const size_t prev_chunk_space_to_right = space_to_right + chunk_size; | |
1140 double prev_chunk_density_to_right = | |
1141 double(prev_chunk_live_to_right) / prev_chunk_space_to_right; | |
1142 if (density_to_right <= prev_chunk_density_to_right) { | |
1143 return dense_prefix; | |
1144 } | |
1145 if (TraceParallelOldGCDensePrefix && Verbose) { | |
1146 tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f " | |
1147 "pc_d2r=%10.8f", sd.chunk(cp), density_to_right, | |
1148 prev_chunk_density_to_right); | |
1149 } | |
1150 dense_prefix -= chunk_size; | |
1151 live_to_right = prev_chunk_live_to_right; | |
1152 space_to_right = prev_chunk_space_to_right; | |
1153 density_to_right = prev_chunk_density_to_right; | |
1154 } | |
1155 return dense_prefix; | |
1156 } | |
1157 | |
1158 dense_prefix += chunk_size; | |
1159 ++cp; | |
1160 } | |
1161 | |
1162 return dense_prefix; | |
1163 } | |
1164 | |
1165 #ifndef PRODUCT | |
1166 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm, | |
1167 const SpaceId id, | |
1168 const bool maximum_compaction, | |
1169 HeapWord* const addr) | |
1170 { | |
1171 const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr); | |
1172 ChunkData* const cp = summary_data().chunk(chunk_idx); | |
1173 const MutableSpace* const space = _space_info[id].space(); | |
1174 HeapWord* const new_top = _space_info[id].new_top(); | |
1175 | |
1176 const size_t space_live = pointer_delta(new_top, space->bottom()); | |
1177 const size_t dead_to_left = pointer_delta(addr, cp->destination()); | |
1178 const size_t space_cap = space->capacity_in_words(); | |
1179 const double dead_to_left_pct = double(dead_to_left) / space_cap; | |
1180 const size_t live_to_right = new_top - cp->destination(); | |
1181 const size_t dead_to_right = space->top() - addr - live_to_right; | |
1182 | |
1183 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " " | |
1184 "spl=" SIZE_FORMAT " " | |
1185 "d2l=" SIZE_FORMAT " d2l%%=%6.4f " | |
1186 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT | |
1187 " ratio=%10.8f", | |
1188 algorithm, addr, chunk_idx, | |
1189 space_live, | |
1190 dead_to_left, dead_to_left_pct, | |
1191 dead_to_right, live_to_right, | |
1192 double(dead_to_right) / live_to_right); | |
1193 } | |
1194 #endif // #ifndef PRODUCT | |
1195 | |
1196 // Return a fraction indicating how much of the generation can be treated as | |
1197 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution | |
1198 // based on the density of live objects in the generation to determine a limit, | |
1199 // which is then adjusted so the return value is min_percent when the density is | |
1200 // 1. | |
1201 // | |
1202 // The following table shows some return values for a different values of the | |
1203 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and | |
1204 // min_percent is 1. | |
1205 // | |
1206 // fraction allowed as dead wood | |
1207 // ----------------------------------------------------------------- | |
1208 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95 | |
1209 // ------- ---------- ---------- ---------- ---------- ---------- ---------- | |
1210 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 | |
1211 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941 | |
1212 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272 | |
1213 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066 | |
1214 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975 | |
1215 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313 | |
1216 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132 | |
1217 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289 | |
1218 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500 | |
1219 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386 | |
1220 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510 | |
1221 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386 | |
1222 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500 | |
1223 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289 | |
1224 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132 | |
1225 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313 | |
1226 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975 | |
1227 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066 | |
1228 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272 | |
1229 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941 | |
1230 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 | |
1231 | |
1232 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent) | |
1233 { | |
1234 assert(_dwl_initialized, "uninitialized"); | |
1235 | |
1236 // The raw limit is the value of the normal distribution at x = density. | |
1237 const double raw_limit = normal_distribution(density); | |
1238 | |
1239 // Adjust the raw limit so it becomes the minimum when the density is 1. | |
1240 // | |
1241 // First subtract the adjustment value (which is simply the precomputed value | |
1242 // normal_distribution(1.0)); this yields a value of 0 when the density is 1. | |
1243 // Then add the minimum value, so the minimum is returned when the density is | |
1244 // 1. Finally, prevent negative values, which occur when the mean is not 0.5. | |
1245 const double min = double(min_percent) / 100.0; | |
1246 const double limit = raw_limit - _dwl_adjustment + min; | |
1247 return MAX2(limit, 0.0); | |
1248 } | |
1249 | |
1250 ParallelCompactData::ChunkData* | |
1251 PSParallelCompact::first_dead_space_chunk(const ChunkData* beg, | |
1252 const ChunkData* end) | |
1253 { | |
1254 const size_t chunk_size = ParallelCompactData::ChunkSize; | |
1255 ParallelCompactData& sd = summary_data(); | |
1256 size_t left = sd.chunk(beg); | |
1257 size_t right = end > beg ? sd.chunk(end) - 1 : left; | |
1258 | |
1259 // Binary search. | |
1260 while (left < right) { | |
1261 // Equivalent to (left + right) / 2, but does not overflow. | |
1262 const size_t middle = left + (right - left) / 2; | |
1263 ChunkData* const middle_ptr = sd.chunk(middle); | |
1264 HeapWord* const dest = middle_ptr->destination(); | |
1265 HeapWord* const addr = sd.chunk_to_addr(middle); | |
1266 assert(dest != NULL, "sanity"); | |
1267 assert(dest <= addr, "must move left"); | |
1268 | |
1269 if (middle > left && dest < addr) { | |
1270 right = middle - 1; | |
1271 } else if (middle < right && middle_ptr->data_size() == chunk_size) { | |
1272 left = middle + 1; | |
1273 } else { | |
1274 return middle_ptr; | |
1275 } | |
1276 } | |
1277 return sd.chunk(left); | |
1278 } | |
1279 | |
1280 ParallelCompactData::ChunkData* | |
1281 PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg, | |
1282 const ChunkData* end, | |
1283 size_t dead_words) | |
1284 { | |
1285 ParallelCompactData& sd = summary_data(); | |
1286 size_t left = sd.chunk(beg); | |
1287 size_t right = end > beg ? sd.chunk(end) - 1 : left; | |
1288 | |
1289 // Binary search. | |
1290 while (left < right) { | |
1291 // Equivalent to (left + right) / 2, but does not overflow. | |
1292 const size_t middle = left + (right - left) / 2; | |
1293 ChunkData* const middle_ptr = sd.chunk(middle); | |
1294 HeapWord* const dest = middle_ptr->destination(); | |
1295 HeapWord* const addr = sd.chunk_to_addr(middle); | |
1296 assert(dest != NULL, "sanity"); | |
1297 assert(dest <= addr, "must move left"); | |
1298 | |
1299 const size_t dead_to_left = pointer_delta(addr, dest); | |
1300 if (middle > left && dead_to_left > dead_words) { | |
1301 right = middle - 1; | |
1302 } else if (middle < right && dead_to_left < dead_words) { | |
1303 left = middle + 1; | |
1304 } else { | |
1305 return middle_ptr; | |
1306 } | |
1307 } | |
1308 return sd.chunk(left); | |
1309 } | |
1310 | |
1311 // The result is valid during the summary phase, after the initial summarization | |
1312 // of each space into itself, and before final summarization. | |
1313 inline double | |
1314 PSParallelCompact::reclaimed_ratio(const ChunkData* const cp, | |
1315 HeapWord* const bottom, | |
1316 HeapWord* const top, | |
1317 HeapWord* const new_top) | |
1318 { | |
1319 ParallelCompactData& sd = summary_data(); | |
1320 | |
1321 assert(cp != NULL, "sanity"); | |
1322 assert(bottom != NULL, "sanity"); | |
1323 assert(top != NULL, "sanity"); | |
1324 assert(new_top != NULL, "sanity"); | |
1325 assert(top >= new_top, "summary data problem?"); | |
1326 assert(new_top > bottom, "space is empty; should not be here"); | |
1327 assert(new_top >= cp->destination(), "sanity"); | |
1328 assert(top >= sd.chunk_to_addr(cp), "sanity"); | |
1329 | |
1330 HeapWord* const destination = cp->destination(); | |
1331 const size_t dense_prefix_live = pointer_delta(destination, bottom); | |
1332 const size_t compacted_region_live = pointer_delta(new_top, destination); | |
1333 const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp)); | |
1334 const size_t reclaimable = compacted_region_used - compacted_region_live; | |
1335 | |
1336 const double divisor = dense_prefix_live + 1.25 * compacted_region_live; | |
1337 return double(reclaimable) / divisor; | |
1338 } | |
1339 | |
1340 // Return the address of the end of the dense prefix, a.k.a. the start of the | |
1341 // compacted region. The address is always on a chunk boundary. | |
1342 // | |
1343 // Completely full chunks at the left are skipped, since no compaction can occur | |
1344 // in those chunks. Then the maximum amount of dead wood to allow is computed, | |
1345 // based on the density (amount live / capacity) of the generation; the chunk | |
1346 // with approximately that amount of dead space to the left is identified as the | |
1347 // limit chunk. Chunks between the last completely full chunk and the limit | |
1348 // chunk are scanned and the one that has the best (maximum) reclaimed_ratio() | |
1349 // is selected. | |
1350 HeapWord* | |
1351 PSParallelCompact::compute_dense_prefix(const SpaceId id, | |
1352 bool maximum_compaction) | |
1353 { | |
1354 const size_t chunk_size = ParallelCompactData::ChunkSize; | |
1355 const ParallelCompactData& sd = summary_data(); | |
1356 | |
1357 const MutableSpace* const space = _space_info[id].space(); | |
1358 HeapWord* const top = space->top(); | |
1359 HeapWord* const top_aligned_up = sd.chunk_align_up(top); | |
1360 HeapWord* const new_top = _space_info[id].new_top(); | |
1361 HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top); | |
1362 HeapWord* const bottom = space->bottom(); | |
1363 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom); | |
1364 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); | |
1365 const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up); | |
1366 | |
1367 // Skip full chunks at the beginning of the space--they are necessarily part | |
1368 // of the dense prefix. | |
1369 const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp); | |
1370 assert(full_cp->destination() == sd.chunk_to_addr(full_cp) || | |
1371 space->is_empty(), "no dead space allowed to the left"); | |
1372 assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1, | |
1373 "chunk must have dead space"); | |
1374 | |
1375 // The gc number is saved whenever a maximum compaction is done, and used to | |
1376 // determine when the maximum compaction interval has expired. This avoids | |
1377 // successive max compactions for different reasons. | |
1378 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); | |
1379 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; | |
1380 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval || | |
1381 total_invocations() == HeapFirstMaximumCompactionCount; | |
1382 if (maximum_compaction || full_cp == top_cp || interval_ended) { | |
1383 _maximum_compaction_gc_num = total_invocations(); | |
1384 return sd.chunk_to_addr(full_cp); | |
1385 } | |
1386 | |
1387 const size_t space_live = pointer_delta(new_top, bottom); | |
1388 const size_t space_used = space->used_in_words(); | |
1389 const size_t space_capacity = space->capacity_in_words(); | |
1390 | |
1391 const double density = double(space_live) / double(space_capacity); | |
1392 const size_t min_percent_free = | |
1393 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio; | |
1394 const double limiter = dead_wood_limiter(density, min_percent_free); | |
1395 const size_t dead_wood_max = space_used - space_live; | |
1396 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter), | |
1397 dead_wood_max); | |
1398 | |
1399 if (TraceParallelOldGCDensePrefix) { | |
1400 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " " | |
1401 "space_cap=" SIZE_FORMAT, | |
1402 space_live, space_used, | |
1403 space_capacity); | |
1404 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f " | |
1405 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT, | |
1406 density, min_percent_free, limiter, | |
1407 dead_wood_max, dead_wood_limit); | |
1408 } | |
1409 | |
1410 // Locate the chunk with the desired amount of dead space to the left. | |
1411 const ChunkData* const limit_cp = | |
1412 dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit); | |
1413 | |
1414 // Scan from the first chunk with dead space to the limit chunk and find the | |
1415 // one with the best (largest) reclaimed ratio. | |
1416 double best_ratio = 0.0; | |
1417 const ChunkData* best_cp = full_cp; | |
1418 for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) { | |
1419 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top); | |
1420 if (tmp_ratio > best_ratio) { | |
1421 best_cp = cp; | |
1422 best_ratio = tmp_ratio; | |
1423 } | |
1424 } | |
1425 | |
1426 #if 0 | |
1427 // Something to consider: if the chunk with the best ratio is 'close to' the | |
1428 // first chunk w/free space, choose the first chunk with free space | |
1429 // ("first-free"). The first-free chunk is usually near the start of the | |
1430 // heap, which means we are copying most of the heap already, so copy a bit | |
1431 // more to get complete compaction. | |
1432 if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) { | |
1433 _maximum_compaction_gc_num = total_invocations(); | |
1434 best_cp = full_cp; | |
1435 } | |
1436 #endif // #if 0 | |
1437 | |
1438 return sd.chunk_to_addr(best_cp); | |
1439 } | |
1440 | |
1441 void PSParallelCompact::summarize_spaces_quick() | |
1442 { | |
1443 for (unsigned int i = 0; i < last_space_id; ++i) { | |
1444 const MutableSpace* space = _space_info[i].space(); | |
1445 bool result = _summary_data.summarize(space->bottom(), space->end(), | |
1446 space->bottom(), space->top(), | |
1447 _space_info[i].new_top_addr()); | |
1448 assert(result, "should never fail"); | |
1449 _space_info[i].set_dense_prefix(space->bottom()); | |
1450 } | |
1451 } | |
1452 | |
1453 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) | |
1454 { | |
1455 HeapWord* const dense_prefix_end = dense_prefix(id); | |
1456 const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end); | |
1457 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end); | |
1458 if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) { | |
1459 // Only enough dead space is filled so that any remaining dead space to the | |
1460 // left is larger than the minimum filler object. (The remainder is filled | |
1461 // during the copy/update phase.) | |
1462 // | |
1463 // The size of the dead space to the right of the boundary is not a | |
1464 // concern, since compaction will be able to use whatever space is | |
1465 // available. | |
1466 // | |
1467 // Here '||' is the boundary, 'x' represents a don't care bit and a box | |
1468 // surrounds the space to be filled with an object. | |
1469 // | |
1470 // In the 32-bit VM, each bit represents two 32-bit words: | |
1471 // +---+ | |
1472 // a) beg_bits: ... x x x | 0 | || 0 x x ... | |
1473 // end_bits: ... x x x | 0 | || 0 x x ... | |
1474 // +---+ | |
1475 // | |
1476 // In the 64-bit VM, each bit represents one 64-bit word: | |
1477 // +------------+ | |
1478 // b) beg_bits: ... x x x | 0 || 0 | x x ... | |
1479 // end_bits: ... x x 1 | 0 || 0 | x x ... | |
1480 // +------------+ | |
1481 // +-------+ | |
1482 // c) beg_bits: ... x x | 0 0 | || 0 x x ... | |
1483 // end_bits: ... x 1 | 0 0 | || 0 x x ... | |
1484 // +-------+ | |
1485 // +-----------+ | |
1486 // d) beg_bits: ... x | 0 0 0 | || 0 x x ... | |
1487 // end_bits: ... 1 | 0 0 0 | || 0 x x ... | |
1488 // +-----------+ | |
1489 // +-------+ | |
1490 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ... | |
1491 // end_bits: ... 0 0 | 0 0 | || 0 x x ... | |
1492 // +-------+ | |
1493 | |
1494 // Initially assume case a, c or e will apply. | |
1495 size_t obj_len = (size_t)oopDesc::header_size(); | |
1496 HeapWord* obj_beg = dense_prefix_end - obj_len; | |
1497 | |
1498 #ifdef _LP64 | |
1499 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) { | |
1500 // Case b above. | |
1501 obj_beg = dense_prefix_end - 1; | |
1502 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) && | |
1503 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) { | |
1504 // Case d above. | |
1505 obj_beg = dense_prefix_end - 3; | |
1506 obj_len = 3; | |
1507 } | |
1508 #endif // #ifdef _LP64 | |
1509 | |
1510 MemRegion region(obj_beg, obj_len); | |
1511 SharedHeap::fill_region_with_object(region); | |
1512 _mark_bitmap.mark_obj(obj_beg, obj_len); | |
1513 _summary_data.add_obj(obj_beg, obj_len); | |
1514 assert(start_array(id) != NULL, "sanity"); | |
1515 start_array(id)->allocate_block(obj_beg); | |
1516 } | |
1517 } | |
1518 | |
1519 void | |
1520 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) | |
1521 { | |
1522 assert(id < last_space_id, "id out of range"); | |
1523 | |
1524 const MutableSpace* space = _space_info[id].space(); | |
1525 HeapWord** new_top_addr = _space_info[id].new_top_addr(); | |
1526 | |
1527 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); | |
1528 _space_info[id].set_dense_prefix(dense_prefix_end); | |
1529 | |
1530 #ifndef PRODUCT | |
1531 if (TraceParallelOldGCDensePrefix) { | |
1532 print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end); | |
1533 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); | |
1534 print_dense_prefix_stats("density", id, maximum_compaction, addr); | |
1535 } | |
1536 #endif // #ifndef PRODUCT | |
1537 | |
1538 // If dead space crosses the dense prefix boundary, it is (at least partially) | |
1539 // filled with a dummy object, marked live and added to the summary data. | |
1540 // This simplifies the copy/update phase and must be done before the final | |
1541 // locations of objects are determined, to prevent leaving a fragment of dead | |
1542 // space that is too small to fill with an object. | |
1543 if (!maximum_compaction && dense_prefix_end != space->bottom()) { | |
1544 fill_dense_prefix_end(id); | |
1545 } | |
1546 | |
1547 // Compute the destination of each Chunk, and thus each object. | |
1548 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); | |
1549 _summary_data.summarize(dense_prefix_end, space->end(), | |
1550 dense_prefix_end, space->top(), | |
1551 new_top_addr); | |
1552 | |
1553 if (TraceParallelOldGCSummaryPhase) { | |
1554 const size_t chunk_size = ParallelCompactData::ChunkSize; | |
1555 const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end); | |
1556 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom()); | |
1557 const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr); | |
1558 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); | |
1559 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " | |
1560 "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " | |
1561 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, | |
1562 id, space->capacity_in_words(), dense_prefix_end, | |
1563 dp_chunk, dp_words / chunk_size, | |
1564 cr_words / chunk_size, *new_top_addr); | |
1565 } | |
1566 } | |
1567 | |
1568 void PSParallelCompact::summary_phase(ParCompactionManager* cm, | |
1569 bool maximum_compaction) | |
1570 { | |
1571 EventMark m("2 summarize"); | |
1572 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); | |
1573 // trace("2"); | |
1574 | |
1575 #ifdef ASSERT | |
1576 if (VerifyParallelOldWithMarkSweep && | |
1577 (PSParallelCompact::total_invocations() % | |
1578 VerifyParallelOldWithMarkSweepInterval) == 0) { | |
1579 verify_mark_bitmap(_mark_bitmap); | |
1580 } | |
1581 if (TraceParallelOldGCMarkingPhase) { | |
1582 tty->print_cr("add_obj_count=" SIZE_FORMAT " " | |
1583 "add_obj_bytes=" SIZE_FORMAT, | |
1584 add_obj_count, add_obj_size * HeapWordSize); | |
1585 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " " | |
1586 "mark_bitmap_bytes=" SIZE_FORMAT, | |
1587 mark_bitmap_count, mark_bitmap_size * HeapWordSize); | |
1588 } | |
1589 #endif // #ifdef ASSERT | |
1590 | |
1591 // Quick summarization of each space into itself, to see how much is live. | |
1592 summarize_spaces_quick(); | |
1593 | |
1594 if (TraceParallelOldGCSummaryPhase) { | |
1595 tty->print_cr("summary_phase: after summarizing each space to self"); | |
1596 Universe::print(); | |
1597 NOT_PRODUCT(print_chunk_ranges()); | |
1598 if (Verbose) { | |
1599 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); | |
1600 } | |
1601 } | |
1602 | |
1603 // The amount of live data that will end up in old space (assuming it fits). | |
1604 size_t old_space_total_live = 0; | |
1605 unsigned int id; | |
1606 for (id = old_space_id; id < last_space_id; ++id) { | |
1607 old_space_total_live += pointer_delta(_space_info[id].new_top(), | |
1608 _space_info[id].space()->bottom()); | |
1609 } | |
1610 | |
1611 const MutableSpace* old_space = _space_info[old_space_id].space(); | |
1612 if (old_space_total_live > old_space->capacity_in_words()) { | |
1613 // XXX - should also try to expand | |
1614 maximum_compaction = true; | |
1615 } else if (!UseParallelOldGCDensePrefix) { | |
1616 maximum_compaction = true; | |
1617 } | |
1618 | |
1619 // Permanent and Old generations. | |
1620 summarize_space(perm_space_id, maximum_compaction); | |
1621 summarize_space(old_space_id, maximum_compaction); | |
1622 | |
1623 // Summarize the remaining spaces (those in the young gen) into old space. If | |
1624 // the live data from a space doesn't fit, the existing summarization is left | |
1625 // intact, so the data is compacted down within the space itself. | |
1626 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr(); | |
1627 HeapWord* const target_space_end = old_space->end(); | |
1628 for (id = eden_space_id; id < last_space_id; ++id) { | |
1629 const MutableSpace* space = _space_info[id].space(); | |
1630 const size_t live = pointer_delta(_space_info[id].new_top(), | |
1631 space->bottom()); | |
1632 const size_t available = pointer_delta(target_space_end, *new_top_addr); | |
1633 if (live <= available) { | |
1634 // All the live data will fit. | |
1635 if (TraceParallelOldGCSummaryPhase) { | |
1636 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT, | |
1637 id, *new_top_addr); | |
1638 } | |
1639 _summary_data.summarize(*new_top_addr, target_space_end, | |
1640 space->bottom(), space->top(), | |
1641 new_top_addr); | |
1642 | |
1643 // Reset the new_top value for the space. | |
1644 _space_info[id].set_new_top(space->bottom()); | |
1645 | |
1646 // Clear the source_chunk field for each chunk in the space. | |
1647 ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom()); | |
1648 ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1); | |
1649 while (beg_chunk <= end_chunk) { | |
1650 beg_chunk->set_source_chunk(0); | |
1651 ++beg_chunk; | |
1652 } | |
1653 } | |
1654 } | |
1655 | |
1656 // Fill in the block data after any changes to the chunks have | |
1657 // been made. | |
1658 #ifdef ASSERT | |
1659 summarize_blocks(cm, perm_space_id); | |
1660 summarize_blocks(cm, old_space_id); | |
1661 #else | |
1662 if (!UseParallelOldGCChunkPointerCalc) { | |
1663 summarize_blocks(cm, perm_space_id); | |
1664 summarize_blocks(cm, old_space_id); | |
1665 } | |
1666 #endif | |
1667 | |
1668 if (TraceParallelOldGCSummaryPhase) { | |
1669 tty->print_cr("summary_phase: after final summarization"); | |
1670 Universe::print(); | |
1671 NOT_PRODUCT(print_chunk_ranges()); | |
1672 if (Verbose) { | |
1673 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info)); | |
1674 } | |
1675 } | |
1676 } | |
1677 | |
1678 // Fill in the BlockData. | |
1679 // Iterate over the spaces and within each space iterate over | |
1680 // the chunks and fill in the BlockData for each chunk. | |
1681 | |
1682 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm, | |
1683 SpaceId first_compaction_space_id) { | |
1684 #if 0 | |
1685 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);) | |
1686 for (SpaceId cur_space_id = first_compaction_space_id; | |
1687 cur_space_id != last_space_id; | |
1688 cur_space_id = next_compaction_space_id(cur_space_id)) { | |
1689 // Iterate over the chunks in the space | |
1690 size_t start_chunk_index = | |
1691 _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom()); | |
1692 BitBlockUpdateClosure bbu(mark_bitmap(), | |
1693 cm, | |
1694 start_chunk_index); | |
1695 // Iterate over blocks. | |
1696 for (size_t chunk_index = start_chunk_index; | |
1697 chunk_index < _summary_data.chunk_count() && | |
1698 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top(); | |
1699 chunk_index++) { | |
1700 | |
1701 // Reset the closure for the new chunk. Note that the closure | |
1702 // maintains some data that does not get reset for each chunk | |
1703 // so a new instance of the closure is no appropriate. | |
1704 bbu.reset_chunk(chunk_index); | |
1705 | |
1706 // Start the iteration with the first live object. This | |
1707 // may return the end of the chunk. That is acceptable since | |
1708 // it will properly limit the iterations. | |
1709 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit( | |
1710 _summary_data.first_live_or_end_in_chunk(chunk_index)); | |
1711 | |
1712 // End the iteration at the end of the chunk. | |
1713 HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index); | |
1714 HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize; | |
1715 ParMarkBitMap::idx_t right_offset = | |
1716 mark_bitmap()->addr_to_bit(chunk_end); | |
1717 | |
1718 // Blocks that have not objects starting in them can be | |
1719 // skipped because their data will never be used. | |
1720 if (left_offset < right_offset) { | |
1721 | |
1722 // Iterate through the objects in the chunk. | |
1723 ParMarkBitMap::idx_t last_offset = | |
1724 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset); | |
1725 | |
1726 // If last_offset is less than right_offset, then the iterations | |
1727 // terminated while it was looking for an end bit. "last_offset" | |
1728 // is then the offset for the last start bit. In this situation | |
1729 // the "offset" field for the next block to the right (_cur_block + 1) | |
1730 // will not have been update although there may be live data | |
1731 // to the left of the chunk. | |
1732 | |
1733 size_t cur_block_plus_1 = bbu.cur_block() + 1; | |
1734 HeapWord* cur_block_plus_1_addr = | |
1735 _summary_data.block_to_addr(bbu.cur_block()) + | |
1736 ParallelCompactData::BlockSize; | |
1737 HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset); | |
1738 #if 1 // This code works. The else doesn't but should. Why does it? | |
1739 // The current block (cur_block()) has already been updated. | |
1740 // The last block that may need to be updated is either the | |
1741 // next block (current block + 1) or the block where the | |
1742 // last object starts (which can be greater than the | |
1743 // next block if there were no objects found in intervening | |
1744 // blocks). | |
1745 size_t last_block = | |
1746 MAX2(bbu.cur_block() + 1, | |
1747 _summary_data.addr_to_block_idx(last_offset_addr)); | |
1748 #else | |
1749 // The current block has already been updated. The only block | |
1750 // that remains to be updated is the block where the last | |
1751 // object in the chunk starts. | |
1752 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr); | |
1753 #endif | |
1754 assert_bit_is_start(last_offset); | |
1755 assert((last_block == _summary_data.block_count()) || | |
1756 (_summary_data.block(last_block)->raw_offset() == 0), | |
1757 "Should not have been set"); | |
1758 // Is the last block still in the current chunk? If still | |
1759 // in this chunk, update the last block (the counting that | |
1760 // included the current block is meant for the offset of the last | |
1761 // block). If not in this chunk, do nothing. Should not | |
1762 // update a block in the next chunk. | |
1763 if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(), | |
1764 last_block)) { | |
1765 if (last_offset < right_offset) { | |
1766 // The last object started in this chunk but ends beyond | |
1767 // this chunk. Update the block for this last object. | |
1768 assert(mark_bitmap()->is_marked(last_offset), "Should be marked"); | |
1769 // No end bit was found. The closure takes care of | |
1770 // the cases where | |
1771 // an objects crosses over into the next block | |
1772 // an objects starts and ends in the next block | |
1773 // It does not handle the case where an object is | |
1774 // the first object in a later block and extends | |
1775 // past the end of the chunk (i.e., the closure | |
1776 // only handles complete objects that are in the range | |
1777 // it is given). That object is handed back here | |
1778 // for any special consideration necessary. | |
1779 // | |
1780 // Is the first bit in the last block a start or end bit? | |
1781 // | |
1782 // If the partial object ends in the last block L, | |
1783 // then the 1st bit in L may be an end bit. | |
1784 // | |
1785 // Else does the last object start in a block after the current | |
1786 // block? A block AA will already have been updated if an | |
1787 // object ends in the next block AA+1. An object found to end in | |
1788 // the AA+1 is the trigger that updates AA. Objects are being | |
1789 // counted in the current block for updaing a following | |
1790 // block. An object may start in later block | |
1791 // block but may extend beyond the last block in the chunk. | |
1792 // Updates are only done when the end of an object has been | |
1793 // found. If the last object (covered by block L) starts | |
1794 // beyond the current block, then no object ends in L (otherwise | |
1795 // L would be the current block). So the first bit in L is | |
1796 // a start bit. | |
1797 // | |
1798 // Else the last objects start in the current block and ends | |
1799 // beyond the chunk. The current block has already been | |
1800 // updated and there is no later block (with an object | |
1801 // starting in it) that needs to be updated. | |
1802 // | |
1803 if (_summary_data.partial_obj_ends_in_block(last_block)) { | |
1804 _summary_data.block(last_block)->set_end_bit_offset( | |
1805 bbu.live_data_left()); | |
1806 } else if (last_offset_addr >= cur_block_plus_1_addr) { | |
1807 // The start of the object is on a later block | |
1808 // (to the right of the current block and there are no | |
1809 // complete live objects to the left of this last object | |
1810 // within the chunk. | |
1811 // The first bit in the block is for the start of the | |
1812 // last object. | |
1813 _summary_data.block(last_block)->set_start_bit_offset( | |
1814 bbu.live_data_left()); | |
1815 } else { | |
1816 // The start of the last object was found in | |
1817 // the current chunk (which has already | |
1818 // been updated). | |
1819 assert(bbu.cur_block() == | |
1820 _summary_data.addr_to_block_idx(last_offset_addr), | |
1821 "Should be a block already processed"); | |
1822 } | |
1823 #ifdef ASSERT | |
1824 // Is there enough block information to find this object? | |
1825 // The destination of the chunk has not been set so the | |
1826 // values returned by calc_new_pointer() and | |
1827 // block_calc_new_pointer() will only be | |
1828 // offsets. But they should agree. | |
1829 HeapWord* moved_obj_with_chunks = | |
1830 _summary_data.chunk_calc_new_pointer(last_offset_addr); | |
1831 HeapWord* moved_obj_with_blocks = | |
1832 _summary_data.calc_new_pointer(last_offset_addr); | |
1833 assert(moved_obj_with_chunks == moved_obj_with_blocks, | |
1834 "Block calculation is wrong"); | |
1835 #endif | |
1836 } else if (last_block < _summary_data.block_count()) { | |
1837 // Iterations ended looking for a start bit (but | |
1838 // did not run off the end of the block table). | |
1839 _summary_data.block(last_block)->set_start_bit_offset( | |
1840 bbu.live_data_left()); | |
1841 } | |
1842 } | |
1843 #ifdef ASSERT | |
1844 // Is there enough block information to find this object? | |
1845 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset); | |
1846 HeapWord* moved_obj_with_chunks = | |
1847 _summary_data.calc_new_pointer(left_offset_addr); | |
1848 HeapWord* moved_obj_with_blocks = | |
1849 _summary_data.calc_new_pointer(left_offset_addr); | |
1850 assert(moved_obj_with_chunks == moved_obj_with_blocks, | |
1851 "Block calculation is wrong"); | |
1852 #endif | |
1853 | |
1854 // Is there another block after the end of this chunk? | |
1855 #ifdef ASSERT | |
1856 if (last_block < _summary_data.block_count()) { | |
1857 // No object may have been found in a block. If that | |
1858 // block is at the end of the chunk, the iteration will | |
1859 // terminate without incrementing the current block so | |
1860 // that the current block is not the last block in the | |
1861 // chunk. That situation precludes asserting that the | |
1862 // current block is the last block in the chunk. Assert | |
1863 // the lesser condition that the current block does not | |
1864 // exceed the chunk. | |
1865 assert(_summary_data.block_to_addr(last_block) <= | |
1866 (_summary_data.chunk_to_addr(chunk_index) + | |
1867 ParallelCompactData::ChunkSize), | |
1868 "Chunk and block inconsistency"); | |
1869 assert(last_offset <= right_offset, "Iteration over ran end"); | |
1870 } | |
1871 #endif | |
1872 } | |
1873 #ifdef ASSERT | |
1874 if (PrintGCDetails && Verbose) { | |
1875 if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) { | |
1876 size_t first_block = | |
1877 chunk_index / ParallelCompactData::BlocksPerChunk; | |
1878 gclog_or_tty->print_cr("first_block " PTR_FORMAT | |
1879 " _offset " PTR_FORMAT | |
1880 "_first_is_start_bit %d", | |
1881 first_block, | |
1882 _summary_data.block(first_block)->raw_offset(), | |
1883 _summary_data.block(first_block)->first_is_start_bit()); | |
1884 } | |
1885 } | |
1886 #endif | |
1887 } | |
1888 } | |
1889 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);) | |
1890 #endif // #if 0 | |
1891 } | |
1892 | |
1893 // This method should contain all heap-specific policy for invoking a full | |
1894 // collection. invoke_no_policy() will only attempt to compact the heap; it | |
1895 // will do nothing further. If we need to bail out for policy reasons, scavenge | |
1896 // before full gc, or any other specialized behavior, it needs to be added here. | |
1897 // | |
1898 // Note that this method should only be called from the vm_thread while at a | |
1899 // safepoint. | |
1900 void PSParallelCompact::invoke(bool maximum_heap_compaction) { | |
1901 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
1902 assert(Thread::current() == (Thread*)VMThread::vm_thread(), | |
1903 "should be in vm thread"); | |
1904 ParallelScavengeHeap* heap = gc_heap(); | |
1905 GCCause::Cause gc_cause = heap->gc_cause(); | |
1906 assert(!heap->is_gc_active(), "not reentrant"); | |
1907 | |
1908 PSAdaptiveSizePolicy* policy = heap->size_policy(); | |
1909 | |
1910 // Before each allocation/collection attempt, find out from the | |
1911 // policy object if GCs are, on the whole, taking too long. If so, | |
1912 // bail out without attempting a collection. The exceptions are | |
1913 // for explicitly requested GC's. | |
1914 if (!policy->gc_time_limit_exceeded() || | |
1915 GCCause::is_user_requested_gc(gc_cause) || | |
1916 GCCause::is_serviceability_requested_gc(gc_cause)) { | |
1917 IsGCActiveMark mark; | |
1918 | |
1919 if (ScavengeBeforeFullGC) { | |
1920 PSScavenge::invoke_no_policy(); | |
1921 } | |
1922 | |
1923 PSParallelCompact::invoke_no_policy(maximum_heap_compaction); | |
1924 } | |
1925 } | |
1926 | |
1927 bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) { | |
1928 size_t addr_chunk_index = addr_to_chunk_idx(addr); | |
1929 return chunk_index == addr_chunk_index; | |
1930 } | |
1931 | |
1932 bool ParallelCompactData::chunk_contains_block(size_t chunk_index, | |
1933 size_t block_index) { | |
1934 size_t first_block_in_chunk = chunk_index * BlocksPerChunk; | |
1935 size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1; | |
1936 | |
1937 return (first_block_in_chunk <= block_index) && | |
1938 (block_index <= last_block_in_chunk); | |
1939 } | |
1940 | |
1941 // This method contains no policy. You should probably | |
1942 // be calling invoke() instead. | |
1943 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { | |
1944 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); | |
1945 assert(ref_processor() != NULL, "Sanity"); | |
1946 | |
139
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
0
diff
changeset
|
1947 if (GC_locker::check_active_before_gc()) { |
0 | 1948 return; |
1949 } | |
1950 | |
1951 TimeStamp marking_start; | |
1952 TimeStamp compaction_start; | |
1953 TimeStamp collection_exit; | |
1954 | |
1955 ParallelScavengeHeap* heap = gc_heap(); | |
1956 GCCause::Cause gc_cause = heap->gc_cause(); | |
1957 PSYoungGen* young_gen = heap->young_gen(); | |
1958 PSOldGen* old_gen = heap->old_gen(); | |
1959 PSPermGen* perm_gen = heap->perm_gen(); | |
1960 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); | |
1961 | |
1962 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; | |
1963 | |
1964 // Make sure data structures are sane, make the heap parsable, and do other | |
1965 // miscellaneous bookkeeping. | |
1966 PreGCValues pre_gc_values; | |
1967 pre_compact(&pre_gc_values); | |
1968 | |
210 | 1969 // Get the compaction manager reserved for the VM thread. |
1970 ParCompactionManager* const vmthread_cm = | |
1971 ParCompactionManager::manager_array(gc_task_manager()->workers()); | |
1972 | |
0 | 1973 // Place after pre_compact() where the number of invocations is incremented. |
1974 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); | |
1975 | |
1976 { | |
1977 ResourceMark rm; | |
1978 HandleMark hm; | |
1979 | |
1980 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; | |
1981 | |
1982 // This is useful for debugging but don't change the output the | |
1983 // the customer sees. | |
1984 const char* gc_cause_str = "Full GC"; | |
1985 if (is_system_gc && PrintGCDetails) { | |
1986 gc_cause_str = "Full GC (System)"; | |
1987 } | |
1988 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
1989 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1990 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); | |
1991 TraceCollectorStats tcs(counters()); | |
1992 TraceMemoryManagerStats tms(true /* Full GC */); | |
1993 | |
1994 if (TraceGen1Time) accumulated_time()->start(); | |
1995 | |
1996 // Let the size policy know we're starting | |
1997 size_policy->major_collection_begin(); | |
1998 | |
1999 // When collecting the permanent generation methodOops may be moving, | |
2000 // so we either have to flush all bcp data or convert it into bci. | |
2001 CodeCache::gc_prologue(); | |
2002 Threads::gc_prologue(); | |
2003 | |
2004 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
2005 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
2006 | |
2007 ref_processor()->enable_discovery(); | |
2008 | |
2009 bool marked_for_unloading = false; | |
2010 | |
2011 marking_start.update(); | |
210 | 2012 marking_phase(vmthread_cm, maximum_heap_compaction); |
0 | 2013 |
2014 #ifndef PRODUCT | |
2015 if (TraceParallelOldGCMarkingPhase) { | |
2016 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d " | |
2017 "cas_by_another %d", | |
2018 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(), | |
2019 mark_bitmap()->cas_by_another()); | |
2020 } | |
2021 #endif // #ifndef PRODUCT | |
2022 | |
2023 #ifdef ASSERT | |
2024 if (VerifyParallelOldWithMarkSweep && | |
2025 (PSParallelCompact::total_invocations() % | |
2026 VerifyParallelOldWithMarkSweepInterval) == 0) { | |
2027 gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()"); | |
2028 if (PrintGCDetails && Verbose) { | |
2029 gclog_or_tty->print_cr("mark_sweep_phase1:"); | |
2030 } | |
2031 // Clear the discovered lists so that discovered objects | |
2032 // don't look like they have been discovered twice. | |
2033 ref_processor()->clear_discovered_references(); | |
2034 | |
2035 PSMarkSweep::allocate_stacks(); | |
2036 MemRegion mr = Universe::heap()->reserved_region(); | |
2037 PSMarkSweep::ref_processor()->enable_discovery(); | |
2038 PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction); | |
2039 } | |
2040 #endif | |
2041 | |
2042 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc; | |
210 | 2043 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc); |
0 | 2044 |
2045 #ifdef ASSERT | |
2046 if (VerifyParallelOldWithMarkSweep && | |
2047 (PSParallelCompact::total_invocations() % | |
2048 VerifyParallelOldWithMarkSweepInterval) == 0) { | |
2049 if (PrintGCDetails && Verbose) { | |
2050 gclog_or_tty->print_cr("mark_sweep_phase2:"); | |
2051 } | |
2052 PSMarkSweep::mark_sweep_phase2(); | |
2053 } | |
2054 #endif | |
2055 | |
2056 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); | |
2057 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); | |
2058 | |
2059 // adjust_roots() updates Universe::_intArrayKlassObj which is | |
2060 // needed by the compaction for filling holes in the dense prefix. | |
2061 adjust_roots(); | |
2062 | |
2063 #ifdef ASSERT | |
2064 if (VerifyParallelOldWithMarkSweep && | |
2065 (PSParallelCompact::total_invocations() % | |
2066 VerifyParallelOldWithMarkSweepInterval) == 0) { | |
2067 // Do a separate verify phase so that the verify | |
2068 // code can use the the forwarding pointers to | |
2069 // check the new pointer calculation. The restore_marks() | |
2070 // has to be done before the real compact. | |
210 | 2071 vmthread_cm->set_action(ParCompactionManager::VerifyUpdate); |
2072 compact_perm(vmthread_cm); | |
2073 compact_serial(vmthread_cm); | |
2074 vmthread_cm->set_action(ParCompactionManager::ResetObjects); | |
2075 compact_perm(vmthread_cm); | |
2076 compact_serial(vmthread_cm); | |
2077 vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy); | |
0 | 2078 |
2079 // For debugging only | |
2080 PSMarkSweep::restore_marks(); | |
2081 PSMarkSweep::deallocate_stacks(); | |
2082 } | |
2083 #endif | |
2084 | |
2085 compaction_start.update(); | |
2086 // Does the perm gen always have to be done serially because | |
2087 // klasses are used in the update of an object? | |
210 | 2088 compact_perm(vmthread_cm); |
0 | 2089 |
2090 if (UseParallelOldGCCompacting) { | |
2091 compact(); | |
2092 } else { | |
210 | 2093 compact_serial(vmthread_cm); |
0 | 2094 } |
2095 | |
2096 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be | |
2097 // done before resizing. | |
2098 post_compact(); | |
2099 | |
2100 // Let the size policy know we're done | |
2101 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); | |
2102 | |
2103 if (UseAdaptiveSizePolicy) { | |
2104 if (PrintAdaptiveSizePolicy) { | |
2105 gclog_or_tty->print("AdaptiveSizeStart: "); | |
2106 gclog_or_tty->stamp(); | |
2107 gclog_or_tty->print_cr(" collection: %d ", | |
2108 heap->total_collections()); | |
2109 if (Verbose) { | |
2110 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" | |
2111 " perm_gen_capacity: %d ", | |
2112 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), | |
2113 perm_gen->capacity_in_bytes()); | |
2114 } | |
2115 } | |
2116 | |
2117 // Don't check if the size_policy is ready here. Let | |
2118 // the size_policy check that internally. | |
2119 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && | |
2120 ((gc_cause != GCCause::_java_lang_system_gc) || | |
2121 UseAdaptiveSizePolicyWithSystemGC)) { | |
2122 // Calculate optimal free space amounts | |
2123 assert(young_gen->max_size() > | |
2124 young_gen->from_space()->capacity_in_bytes() + | |
2125 young_gen->to_space()->capacity_in_bytes(), | |
2126 "Sizes of space in young gen are out-of-bounds"); | |
2127 size_t max_eden_size = young_gen->max_size() - | |
2128 young_gen->from_space()->capacity_in_bytes() - | |
2129 young_gen->to_space()->capacity_in_bytes(); | |
2130 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), | |
2131 young_gen->eden_space()->used_in_bytes(), | |
2132 old_gen->used_in_bytes(), | |
2133 perm_gen->used_in_bytes(), | |
2134 young_gen->eden_space()->capacity_in_bytes(), | |
2135 old_gen->max_gen_size(), | |
2136 max_eden_size, | |
2137 true /* full gc*/, | |
2138 gc_cause); | |
2139 | |
2140 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); | |
2141 | |
2142 // Don't resize the young generation at an major collection. A | |
2143 // desired young generation size may have been calculated but | |
2144 // resizing the young generation complicates the code because the | |
2145 // resizing of the old generation may have moved the boundary | |
2146 // between the young generation and the old generation. Let the | |
2147 // young generation resizing happen at the minor collections. | |
2148 } | |
2149 if (PrintAdaptiveSizePolicy) { | |
2150 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", | |
2151 heap->total_collections()); | |
2152 } | |
2153 } | |
2154 | |
2155 if (UsePerfData) { | |
2156 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); | |
2157 counters->update_counters(); | |
2158 counters->update_old_capacity(old_gen->capacity_in_bytes()); | |
2159 counters->update_young_capacity(young_gen->capacity_in_bytes()); | |
2160 } | |
2161 | |
2162 heap->resize_all_tlabs(); | |
2163 | |
2164 // We collected the perm gen, so we'll resize it here. | |
2165 perm_gen->compute_new_size(pre_gc_values.perm_gen_used()); | |
2166 | |
2167 if (TraceGen1Time) accumulated_time()->stop(); | |
2168 | |
2169 if (PrintGC) { | |
2170 if (PrintGCDetails) { | |
2171 // No GC timestamp here. This is after GC so it would be confusing. | |
2172 young_gen->print_used_change(pre_gc_values.young_gen_used()); | |
2173 old_gen->print_used_change(pre_gc_values.old_gen_used()); | |
2174 heap->print_heap_change(pre_gc_values.heap_used()); | |
2175 // Print perm gen last (print_heap_change() excludes the perm gen). | |
2176 perm_gen->print_used_change(pre_gc_values.perm_gen_used()); | |
2177 } else { | |
2178 heap->print_heap_change(pre_gc_values.heap_used()); | |
2179 } | |
2180 } | |
2181 | |
2182 // Track memory usage and detect low memory | |
2183 MemoryService::track_memory_usage(); | |
2184 heap->update_counters(); | |
2185 | |
2186 if (PrintGCDetails) { | |
2187 if (size_policy->print_gc_time_limit_would_be_exceeded()) { | |
2188 if (size_policy->gc_time_limit_exceeded()) { | |
2189 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " | |
2190 "of %d%%", GCTimeLimit); | |
2191 } else { | |
2192 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " | |
2193 "of %d%%", GCTimeLimit); | |
2194 } | |
2195 } | |
2196 size_policy->set_print_gc_time_limit_would_be_exceeded(false); | |
2197 } | |
2198 } | |
2199 | |
2200 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { | |
2201 HandleMark hm; // Discard invalid handles created during verification | |
2202 gclog_or_tty->print(" VerifyAfterGC:"); | |
2203 Universe::verify(false); | |
2204 } | |
2205 | |
2206 // Re-verify object start arrays | |
2207 if (VerifyObjectStartArray && | |
2208 VerifyAfterGC) { | |
2209 old_gen->verify_object_start_array(); | |
2210 perm_gen->verify_object_start_array(); | |
2211 } | |
2212 | |
2213 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
2214 | |
2215 collection_exit.update(); | |
2216 | |
2217 if (PrintHeapAtGC) { | |
2218 Universe::print_heap_after_gc(); | |
2219 } | |
2220 if (PrintGCTaskTimeStamps) { | |
2221 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " | |
2222 INT64_FORMAT, | |
2223 marking_start.ticks(), compaction_start.ticks(), | |
2224 collection_exit.ticks()); | |
2225 gc_task_manager()->print_task_time_stamps(); | |
2226 } | |
2227 } | |
2228 | |
2229 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, | |
2230 PSYoungGen* young_gen, | |
2231 PSOldGen* old_gen) { | |
2232 MutableSpace* const eden_space = young_gen->eden_space(); | |
2233 assert(!eden_space->is_empty(), "eden must be non-empty"); | |
2234 assert(young_gen->virtual_space()->alignment() == | |
2235 old_gen->virtual_space()->alignment(), "alignments do not match"); | |
2236 | |
2237 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { | |
2238 return false; | |
2239 } | |
2240 | |
2241 // Both generations must be completely committed. | |
2242 if (young_gen->virtual_space()->uncommitted_size() != 0) { | |
2243 return false; | |
2244 } | |
2245 if (old_gen->virtual_space()->uncommitted_size() != 0) { | |
2246 return false; | |
2247 } | |
2248 | |
2249 // Figure out how much to take from eden. Include the average amount promoted | |
2250 // in the total; otherwise the next young gen GC will simply bail out to a | |
2251 // full GC. | |
2252 const size_t alignment = old_gen->virtual_space()->alignment(); | |
2253 const size_t eden_used = eden_space->used_in_bytes(); | |
2254 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); | |
2255 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); | |
2256 const size_t eden_capacity = eden_space->capacity_in_bytes(); | |
2257 | |
2258 if (absorb_size >= eden_capacity) { | |
2259 return false; // Must leave some space in eden. | |
2260 } | |
2261 | |
2262 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; | |
2263 if (new_young_size < young_gen->min_gen_size()) { | |
2264 return false; // Respect young gen minimum size. | |
2265 } | |
2266 | |
2267 if (TraceAdaptiveGCBoundary && Verbose) { | |
2268 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " | |
2269 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " | |
2270 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " | |
2271 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", | |
2272 absorb_size / K, | |
2273 eden_capacity / K, (eden_capacity - absorb_size) / K, | |
2274 young_gen->from_space()->used_in_bytes() / K, | |
2275 young_gen->to_space()->used_in_bytes() / K, | |
2276 young_gen->capacity_in_bytes() / K, new_young_size / K); | |
2277 } | |
2278 | |
2279 // Fill the unused part of the old gen. | |
2280 MutableSpace* const old_space = old_gen->object_space(); | |
2281 MemRegion old_gen_unused(old_space->top(), old_space->end()); | |
2282 if (!old_gen_unused.is_empty()) { | |
2283 SharedHeap::fill_region_with_object(old_gen_unused); | |
2284 } | |
2285 | |
2286 // Take the live data from eden and set both top and end in the old gen to | |
2287 // eden top. (Need to set end because reset_after_change() mangles the region | |
2288 // from end to virtual_space->high() in debug builds). | |
2289 HeapWord* const new_top = eden_space->top(); | |
2290 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), | |
2291 absorb_size); | |
2292 young_gen->reset_after_change(); | |
2293 old_space->set_top(new_top); | |
2294 old_space->set_end(new_top); | |
2295 old_gen->reset_after_change(); | |
2296 | |
2297 // Update the object start array for the filler object and the data from eden. | |
2298 ObjectStartArray* const start_array = old_gen->start_array(); | |
2299 HeapWord* const start = old_gen_unused.start(); | |
2300 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { | |
2301 start_array->allocate_block(addr); | |
2302 } | |
2303 | |
2304 // Could update the promoted average here, but it is not typically updated at | |
2305 // full GCs and the value to use is unclear. Something like | |
2306 // | |
2307 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. | |
2308 | |
2309 size_policy->set_bytes_absorbed_from_eden(absorb_size); | |
2310 return true; | |
2311 } | |
2312 | |
2313 GCTaskManager* const PSParallelCompact::gc_task_manager() { | |
2314 assert(ParallelScavengeHeap::gc_task_manager() != NULL, | |
2315 "shouldn't return NULL"); | |
2316 return ParallelScavengeHeap::gc_task_manager(); | |
2317 } | |
2318 | |
2319 void PSParallelCompact::marking_phase(ParCompactionManager* cm, | |
2320 bool maximum_heap_compaction) { | |
2321 // Recursively traverse all live objects and mark them | |
2322 EventMark m("1 mark object"); | |
2323 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); | |
2324 | |
2325 ParallelScavengeHeap* heap = gc_heap(); | |
2326 uint parallel_gc_threads = heap->gc_task_manager()->workers(); | |
2327 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); | |
2328 ParallelTaskTerminator terminator(parallel_gc_threads, qset); | |
2329 | |
2330 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); | |
2331 PSParallelCompact::FollowStackClosure follow_stack_closure(cm); | |
2332 | |
2333 { | |
2334 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); | |
2335 | |
2336 GCTaskQueue* q = GCTaskQueue::create(); | |
2337 | |
2338 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe)); | |
2339 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles)); | |
2340 // We scan the thread roots in parallel | |
2341 Threads::create_thread_roots_marking_tasks(q); | |
2342 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer)); | |
2343 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler)); | |
2344 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management)); | |
2345 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary)); | |
2346 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti)); | |
2347 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols)); | |
2348 | |
2349 if (parallel_gc_threads > 1) { | |
2350 for (uint j = 0; j < parallel_gc_threads; j++) { | |
2351 q->enqueue(new StealMarkingTask(&terminator)); | |
2352 } | |
2353 } | |
2354 | |
2355 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); | |
2356 q->enqueue(fin); | |
2357 | |
2358 gc_task_manager()->add_list(q); | |
2359 | |
2360 fin->wait_for(); | |
2361 | |
2362 // We have to release the barrier tasks! | |
2363 WaitForBarrierGCTask::destroy(fin); | |
2364 } | |
2365 | |
2366 // Process reference objects found during marking | |
2367 { | |
2368 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); | |
2369 ReferencePolicy *soft_ref_policy; | |
2370 if (maximum_heap_compaction) { | |
2371 soft_ref_policy = new AlwaysClearPolicy(); | |
2372 } else { | |
2373 #ifdef COMPILER2 | |
2374 soft_ref_policy = new LRUMaxHeapPolicy(); | |
2375 #else | |
2376 soft_ref_policy = new LRUCurrentHeapPolicy(); | |
2377 #endif // COMPILER2 | |
2378 } | |
2379 assert(soft_ref_policy != NULL, "No soft reference policy"); | |
2380 if (ref_processor()->processing_is_mt()) { | |
2381 RefProcTaskExecutor task_executor; | |
2382 ref_processor()->process_discovered_references( | |
2383 soft_ref_policy, is_alive_closure(), &mark_and_push_closure, | |
2384 &follow_stack_closure, &task_executor); | |
2385 } else { | |
2386 ref_processor()->process_discovered_references( | |
2387 soft_ref_policy, is_alive_closure(), &mark_and_push_closure, | |
2388 &follow_stack_closure, NULL); | |
2389 } | |
2390 } | |
2391 | |
2392 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); | |
2393 // Follow system dictionary roots and unload classes. | |
2394 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); | |
2395 | |
2396 // Follow code cache roots. | |
2397 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure, | |
2398 purged_class); | |
2399 follow_stack(cm); // Flush marking stack. | |
2400 | |
2401 // Update subklass/sibling/implementor links of live klasses | |
2402 // revisit_klass_stack is used in follow_weak_klass_links(). | |
2403 follow_weak_klass_links(cm); | |
2404 | |
2405 // Visit symbol and interned string tables and delete unmarked oops | |
2406 SymbolTable::unlink(is_alive_closure()); | |
2407 StringTable::unlink(is_alive_closure()); | |
2408 | |
2409 assert(cm->marking_stack()->size() == 0, "stack should be empty by now"); | |
2410 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now"); | |
2411 } | |
2412 | |
2413 // This should be moved to the shared markSweep code! | |
2414 class PSAlwaysTrueClosure: public BoolObjectClosure { | |
2415 public: | |
2416 void do_object(oop p) { ShouldNotReachHere(); } | |
2417 bool do_object_b(oop p) { return true; } | |
2418 }; | |
2419 static PSAlwaysTrueClosure always_true; | |
2420 | |
2421 void PSParallelCompact::adjust_roots() { | |
2422 // Adjust the pointers to reflect the new locations | |
2423 EventMark m("3 adjust roots"); | |
2424 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); | |
2425 | |
2426 // General strong roots. | |
2427 Universe::oops_do(adjust_root_pointer_closure()); | |
2428 ReferenceProcessor::oops_do(adjust_root_pointer_closure()); | |
2429 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles | |
2430 Threads::oops_do(adjust_root_pointer_closure()); | |
2431 ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); | |
2432 FlatProfiler::oops_do(adjust_root_pointer_closure()); | |
2433 Management::oops_do(adjust_root_pointer_closure()); | |
2434 JvmtiExport::oops_do(adjust_root_pointer_closure()); | |
2435 // SO_AllClasses | |
2436 SystemDictionary::oops_do(adjust_root_pointer_closure()); | |
2437 vmSymbols::oops_do(adjust_root_pointer_closure()); | |
2438 | |
2439 // Now adjust pointers in remaining weak roots. (All of which should | |
2440 // have been cleared if they pointed to non-surviving objects.) | |
2441 // Global (weak) JNI handles | |
2442 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); | |
2443 | |
2444 CodeCache::oops_do(adjust_pointer_closure()); | |
2445 SymbolTable::oops_do(adjust_root_pointer_closure()); | |
2446 StringTable::oops_do(adjust_root_pointer_closure()); | |
2447 ref_processor()->weak_oops_do(adjust_root_pointer_closure()); | |
2448 // Roots were visited so references into the young gen in roots | |
2449 // may have been scanned. Process them also. | |
2450 // Should the reference processor have a span that excludes | |
2451 // young gen objects? | |
2452 PSScavenge::reference_processor()->weak_oops_do( | |
2453 adjust_root_pointer_closure()); | |
2454 } | |
2455 | |
2456 void PSParallelCompact::compact_perm(ParCompactionManager* cm) { | |
2457 EventMark m("4 compact perm"); | |
2458 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty); | |
2459 // trace("4"); | |
2460 | |
2461 gc_heap()->perm_gen()->start_array()->reset(); | |
2462 move_and_update(cm, perm_space_id); | |
2463 } | |
2464 | |
2465 void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q, | |
2466 uint parallel_gc_threads) { | |
2467 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); | |
2468 | |
2469 const unsigned int task_count = MAX2(parallel_gc_threads, 1U); | |
2470 for (unsigned int j = 0; j < task_count; j++) { | |
2471 q->enqueue(new DrainStacksCompactionTask()); | |
2472 } | |
2473 | |
2474 // Find all chunks that are available (can be filled immediately) and | |
2475 // distribute them to the thread stacks. The iteration is done in reverse | |
2476 // order (high to low) so the chunks will be removed in ascending order. | |
2477 | |
2478 const ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
2479 | |
2480 size_t fillable_chunks = 0; // A count for diagnostic purposes. | |
2481 unsigned int which = 0; // The worker thread number. | |
2482 | |
2483 for (unsigned int id = to_space_id; id > perm_space_id; --id) { | |
2484 SpaceInfo* const space_info = _space_info + id; | |
2485 MutableSpace* const space = space_info->space(); | |
2486 HeapWord* const new_top = space_info->new_top(); | |
2487 | |
2488 const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix()); | |
2489 const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top)); | |
2490 assert(end_chunk > 0, "perm gen cannot be empty"); | |
2491 | |
2492 for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) { | |
2493 if (sd.chunk(cur)->claim_unsafe()) { | |
2494 ParCompactionManager* cm = ParCompactionManager::manager_array(which); | |
2495 cm->save_for_processing(cur); | |
2496 | |
2497 if (TraceParallelOldGCCompactionPhase && Verbose) { | |
2498 const size_t count_mod_8 = fillable_chunks & 7; | |
2499 if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); | |
2500 gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur); | |
2501 if (count_mod_8 == 7) gclog_or_tty->cr(); | |
2502 } | |
2503 | |
2504 NOT_PRODUCT(++fillable_chunks;) | |
2505 | |
2506 // Assign chunks to threads in round-robin fashion. | |
2507 if (++which == task_count) { | |
2508 which = 0; | |
2509 } | |
2510 } | |
2511 } | |
2512 } | |
2513 | |
2514 if (TraceParallelOldGCCompactionPhase) { | |
2515 if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr(); | |
2516 gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks); | |
2517 } | |
2518 } | |
2519 | |
2520 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 | |
2521 | |
2522 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, | |
2523 uint parallel_gc_threads) { | |
2524 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); | |
2525 | |
2526 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
2527 | |
2528 // Iterate over all the spaces adding tasks for updating | |
2529 // chunks in the dense prefix. Assume that 1 gc thread | |
2530 // will work on opening the gaps and the remaining gc threads | |
2531 // will work on the dense prefix. | |
2532 SpaceId space_id = old_space_id; | |
2533 while (space_id != last_space_id) { | |
2534 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); | |
2535 const MutableSpace* const space = _space_info[space_id].space(); | |
2536 | |
2537 if (dense_prefix_end == space->bottom()) { | |
2538 // There is no dense prefix for this space. | |
2539 space_id = next_compaction_space_id(space_id); | |
2540 continue; | |
2541 } | |
2542 | |
2543 // The dense prefix is before this chunk. | |
2544 size_t chunk_index_end_dense_prefix = | |
2545 sd.addr_to_chunk_idx(dense_prefix_end); | |
2546 ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix); | |
2547 assert(dense_prefix_end == space->end() || | |
2548 dense_prefix_cp->available() || | |
2549 dense_prefix_cp->claimed(), | |
2550 "The chunk after the dense prefix should always be ready to fill"); | |
2551 | |
2552 size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom()); | |
2553 | |
2554 // Is there dense prefix work? | |
2555 size_t total_dense_prefix_chunks = | |
2556 chunk_index_end_dense_prefix - chunk_index_start; | |
2557 // How many chunks of the dense prefix should be given to | |
2558 // each thread? | |
2559 if (total_dense_prefix_chunks > 0) { | |
2560 uint tasks_for_dense_prefix = 1; | |
2561 if (UseParallelDensePrefixUpdate) { | |
2562 if (total_dense_prefix_chunks <= | |
2563 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { | |
2564 // Don't over partition. This assumes that | |
2565 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value | |
2566 // so there are not many chunks to process. | |
2567 tasks_for_dense_prefix = parallel_gc_threads; | |
2568 } else { | |
2569 // Over partition | |
2570 tasks_for_dense_prefix = parallel_gc_threads * | |
2571 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; | |
2572 } | |
2573 } | |
2574 size_t chunks_per_thread = total_dense_prefix_chunks / | |
2575 tasks_for_dense_prefix; | |
2576 // Give each thread at least 1 chunk. | |
2577 if (chunks_per_thread == 0) { | |
2578 chunks_per_thread = 1; | |
2579 } | |
2580 | |
2581 for (uint k = 0; k < tasks_for_dense_prefix; k++) { | |
2582 if (chunk_index_start >= chunk_index_end_dense_prefix) { | |
2583 break; | |
2584 } | |
2585 // chunk_index_end is not processed | |
2586 size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread, | |
2587 chunk_index_end_dense_prefix); | |
2588 q->enqueue(new UpdateDensePrefixTask( | |
2589 space_id, | |
2590 chunk_index_start, | |
2591 chunk_index_end)); | |
2592 chunk_index_start = chunk_index_end; | |
2593 } | |
2594 } | |
2595 // This gets any part of the dense prefix that did not | |
2596 // fit evenly. | |
2597 if (chunk_index_start < chunk_index_end_dense_prefix) { | |
2598 q->enqueue(new UpdateDensePrefixTask( | |
2599 space_id, | |
2600 chunk_index_start, | |
2601 chunk_index_end_dense_prefix)); | |
2602 } | |
2603 space_id = next_compaction_space_id(space_id); | |
2604 } // End tasks for dense prefix | |
2605 } | |
2606 | |
2607 void PSParallelCompact::enqueue_chunk_stealing_tasks( | |
2608 GCTaskQueue* q, | |
2609 ParallelTaskTerminator* terminator_ptr, | |
2610 uint parallel_gc_threads) { | |
2611 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); | |
2612 | |
2613 // Once a thread has drained it's stack, it should try to steal chunks from | |
2614 // other threads. | |
2615 if (parallel_gc_threads > 1) { | |
2616 for (uint j = 0; j < parallel_gc_threads; j++) { | |
2617 q->enqueue(new StealChunkCompactionTask(terminator_ptr)); | |
2618 } | |
2619 } | |
2620 } | |
2621 | |
2622 void PSParallelCompact::compact() { | |
2623 EventMark m("5 compact"); | |
2624 // trace("5"); | |
2625 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty); | |
2626 | |
2627 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
2628 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
2629 PSOldGen* old_gen = heap->old_gen(); | |
2630 old_gen->start_array()->reset(); | |
2631 uint parallel_gc_threads = heap->gc_task_manager()->workers(); | |
2632 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); | |
2633 ParallelTaskTerminator terminator(parallel_gc_threads, qset); | |
2634 | |
2635 GCTaskQueue* q = GCTaskQueue::create(); | |
2636 enqueue_chunk_draining_tasks(q, parallel_gc_threads); | |
2637 enqueue_dense_prefix_tasks(q, parallel_gc_threads); | |
2638 enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads); | |
2639 | |
2640 { | |
2641 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); | |
2642 | |
2643 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); | |
2644 q->enqueue(fin); | |
2645 | |
2646 gc_task_manager()->add_list(q); | |
2647 | |
2648 fin->wait_for(); | |
2649 | |
2650 // We have to release the barrier tasks! | |
2651 WaitForBarrierGCTask::destroy(fin); | |
2652 | |
2653 #ifdef ASSERT | |
2654 // Verify that all chunks have been processed before the deferred updates. | |
2655 // Note that perm_space_id is skipped; this type of verification is not | |
2656 // valid until the perm gen is compacted by chunks. | |
2657 for (unsigned int id = old_space_id; id < last_space_id; ++id) { | |
2658 verify_complete(SpaceId(id)); | |
2659 } | |
2660 #endif | |
2661 } | |
2662 | |
2663 { | |
2664 // Update the deferred objects, if any. Any compaction manager can be used. | |
2665 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty); | |
2666 ParCompactionManager* cm = ParCompactionManager::manager_array(0); | |
2667 for (unsigned int id = old_space_id; id < last_space_id; ++id) { | |
2668 update_deferred_objects(cm, SpaceId(id)); | |
2669 } | |
2670 } | |
2671 } | |
2672 | |
2673 #ifdef ASSERT | |
2674 void PSParallelCompact::verify_complete(SpaceId space_id) { | |
2675 // All Chunks between space bottom() to new_top() should be marked as filled | |
2676 // and all Chunks between new_top() and top() should be available (i.e., | |
2677 // should have been emptied). | |
2678 ParallelCompactData& sd = summary_data(); | |
2679 SpaceInfo si = _space_info[space_id]; | |
2680 HeapWord* new_top_addr = sd.chunk_align_up(si.new_top()); | |
2681 HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top()); | |
2682 const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom()); | |
2683 const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr); | |
2684 const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr); | |
2685 | |
2686 bool issued_a_warning = false; | |
2687 | |
2688 size_t cur_chunk; | |
2689 for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) { | |
2690 const ChunkData* const c = sd.chunk(cur_chunk); | |
2691 if (!c->completed()) { | |
2692 warning("chunk " SIZE_FORMAT " not filled: " | |
2693 "destination_count=" SIZE_FORMAT, | |
2694 cur_chunk, c->destination_count()); | |
2695 issued_a_warning = true; | |
2696 } | |
2697 } | |
2698 | |
2699 for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) { | |
2700 const ChunkData* const c = sd.chunk(cur_chunk); | |
2701 if (!c->available()) { | |
2702 warning("chunk " SIZE_FORMAT " not empty: " | |
2703 "destination_count=" SIZE_FORMAT, | |
2704 cur_chunk, c->destination_count()); | |
2705 issued_a_warning = true; | |
2706 } | |
2707 } | |
2708 | |
2709 if (issued_a_warning) { | |
2710 print_chunk_ranges(); | |
2711 } | |
2712 } | |
2713 #endif // #ifdef ASSERT | |
2714 | |
2715 void PSParallelCompact::compact_serial(ParCompactionManager* cm) { | |
2716 EventMark m("5 compact serial"); | |
2717 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty); | |
2718 | |
2719 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
2720 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
2721 | |
2722 PSYoungGen* young_gen = heap->young_gen(); | |
2723 PSOldGen* old_gen = heap->old_gen(); | |
2724 | |
2725 old_gen->start_array()->reset(); | |
2726 old_gen->move_and_update(cm); | |
2727 young_gen->move_and_update(cm); | |
2728 } | |
2729 | |
2730 | |
2731 void PSParallelCompact::follow_stack(ParCompactionManager* cm) { | |
2732 while(!cm->overflow_stack()->is_empty()) { | |
2733 oop obj = cm->overflow_stack()->pop(); | |
2734 obj->follow_contents(cm); | |
2735 } | |
2736 | |
2737 oop obj; | |
2738 // obj is a reference!!! | |
2739 while (cm->marking_stack()->pop_local(obj)) { | |
2740 // It would be nice to assert about the type of objects we might | |
2741 // pop, but they can come from anywhere, unfortunately. | |
2742 obj->follow_contents(cm); | |
2743 } | |
2744 } | |
2745 | |
2746 void | |
2747 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) { | |
2748 // All klasses on the revisit stack are marked at this point. | |
2749 // Update and follow all subklass, sibling and implementor links. | |
2750 for (uint i = 0; i < ParallelGCThreads+1; i++) { | |
2751 ParCompactionManager* cm = ParCompactionManager::manager_array(i); | |
2752 KeepAliveClosure keep_alive_closure(cm); | |
2753 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) { | |
2754 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links( | |
2755 is_alive_closure(), | |
2756 &keep_alive_closure); | |
2757 } | |
2758 follow_stack(cm); | |
2759 } | |
2760 } | |
2761 | |
2762 void | |
2763 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) { | |
2764 cm->revisit_klass_stack()->push(k); | |
2765 } | |
2766 | |
2767 #ifdef VALIDATE_MARK_SWEEP | |
2768 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
2769 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { |
0 | 2770 if (!ValidateMarkSweep) |
2771 return; | |
2772 | |
2773 if (!isroot) { | |
2774 if (_pointer_tracking) { | |
2775 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer"); | |
2776 _adjusted_pointers->remove(p); | |
2777 } | |
2778 } else { | |
2779 ptrdiff_t index = _root_refs_stack->find(p); | |
2780 if (index != -1) { | |
2781 int l = _root_refs_stack->length(); | |
2782 if (l > 0 && l - 1 != index) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
2783 void* last = _root_refs_stack->pop(); |
0 | 2784 assert(last != p, "should be different"); |
2785 _root_refs_stack->at_put(index, last); | |
2786 } else { | |
2787 _root_refs_stack->remove(p); | |
2788 } | |
2789 } | |
2790 } | |
2791 } | |
2792 | |
2793 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
2794 void PSParallelCompact::check_adjust_pointer(void* p) { |
0 | 2795 _adjusted_pointers->push(p); |
2796 } | |
2797 | |
2798 | |
2799 class AdjusterTracker: public OopClosure { | |
2800 public: | |
2801 AdjusterTracker() {}; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
2802 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
2803 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); } |
0 | 2804 }; |
2805 | |
2806 | |
2807 void PSParallelCompact::track_interior_pointers(oop obj) { | |
2808 if (ValidateMarkSweep) { | |
2809 _adjusted_pointers->clear(); | |
2810 _pointer_tracking = true; | |
2811 | |
2812 AdjusterTracker checker; | |
2813 obj->oop_iterate(&checker); | |
2814 } | |
2815 } | |
2816 | |
2817 | |
2818 void PSParallelCompact::check_interior_pointers() { | |
2819 if (ValidateMarkSweep) { | |
2820 _pointer_tracking = false; | |
2821 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers"); | |
2822 } | |
2823 } | |
2824 | |
2825 | |
2826 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) { | |
2827 if (ValidateMarkSweep) { | |
2828 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops"); | |
2829 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0; | |
2830 } | |
2831 } | |
2832 | |
2833 | |
2834 void PSParallelCompact::register_live_oop(oop p, size_t size) { | |
2835 if (ValidateMarkSweep) { | |
2836 _live_oops->push(p); | |
2837 _live_oops_size->push(size); | |
2838 _live_oops_index++; | |
2839 } | |
2840 } | |
2841 | |
2842 void PSParallelCompact::validate_live_oop(oop p, size_t size) { | |
2843 if (ValidateMarkSweep) { | |
2844 oop obj = _live_oops->at((int)_live_oops_index); | |
2845 guarantee(obj == p, "should be the same object"); | |
2846 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size"); | |
2847 _live_oops_index++; | |
2848 } | |
2849 } | |
2850 | |
2851 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size, | |
2852 HeapWord* compaction_top) { | |
2853 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top), | |
2854 "should be moved to forwarded location"); | |
2855 if (ValidateMarkSweep) { | |
2856 PSParallelCompact::validate_live_oop(oop(q), size); | |
2857 _live_oops_moved_to->push(oop(compaction_top)); | |
2858 } | |
2859 if (RecordMarkSweepCompaction) { | |
2860 _cur_gc_live_oops->push(q); | |
2861 _cur_gc_live_oops_moved_to->push(compaction_top); | |
2862 _cur_gc_live_oops_size->push(size); | |
2863 } | |
2864 } | |
2865 | |
2866 | |
2867 void PSParallelCompact::compaction_complete() { | |
2868 if (RecordMarkSweepCompaction) { | |
2869 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops; | |
2870 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to; | |
2871 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size; | |
2872 | |
2873 _cur_gc_live_oops = _last_gc_live_oops; | |
2874 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to; | |
2875 _cur_gc_live_oops_size = _last_gc_live_oops_size; | |
2876 _last_gc_live_oops = _tmp_live_oops; | |
2877 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to; | |
2878 _last_gc_live_oops_size = _tmp_live_oops_size; | |
2879 } | |
2880 } | |
2881 | |
2882 | |
2883 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) { | |
2884 if (!RecordMarkSweepCompaction) { | |
2885 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled"); | |
2886 return; | |
2887 } | |
2888 | |
2889 if (_last_gc_live_oops == NULL) { | |
2890 tty->print_cr("No compaction information gathered yet"); | |
2891 return; | |
2892 } | |
2893 | |
2894 for (int i = 0; i < _last_gc_live_oops->length(); i++) { | |
2895 HeapWord* old_oop = _last_gc_live_oops->at(i); | |
2896 size_t sz = _last_gc_live_oops_size->at(i); | |
2897 if (old_oop <= q && q < (old_oop + sz)) { | |
2898 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i); | |
2899 size_t offset = (q - old_oop); | |
2900 tty->print_cr("Address " PTR_FORMAT, q); | |
2901 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset); | |
2902 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset); | |
2903 return; | |
2904 } | |
2905 } | |
2906 | |
2907 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q); | |
2908 } | |
2909 #endif //VALIDATE_MARK_SWEEP | |
2910 | |
2911 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk). | |
2912 void | |
2913 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, | |
2914 SpaceId space_id, | |
2915 size_t beg_chunk, | |
2916 size_t end_chunk) { | |
2917 ParallelCompactData& sd = summary_data(); | |
2918 ParMarkBitMap* const mbm = mark_bitmap(); | |
2919 | |
2920 HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk); | |
2921 HeapWord* const end_addr = sd.chunk_to_addr(end_chunk); | |
2922 assert(beg_chunk <= end_chunk, "bad chunk range"); | |
2923 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix"); | |
2924 | |
2925 #ifdef ASSERT | |
2926 // Claim the chunks to avoid triggering an assert when they are marked as | |
2927 // filled. | |
2928 for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) { | |
2929 assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed"); | |
2930 } | |
2931 #endif // #ifdef ASSERT | |
2932 | |
2933 if (beg_addr != space(space_id)->bottom()) { | |
2934 // Find the first live object or block of dead space that *starts* in this | |
2935 // range of chunks. If a partial object crosses onto the chunk, skip it; it | |
2936 // will be marked for 'deferred update' when the object head is processed. | |
2937 // If dead space crosses onto the chunk, it is also skipped; it will be | |
2938 // filled when the prior chunk is processed. If neither of those apply, the | |
2939 // first word in the chunk is the start of a live object or dead space. | |
2940 assert(beg_addr > space(space_id)->bottom(), "sanity"); | |
2941 const ChunkData* const cp = sd.chunk(beg_chunk); | |
2942 if (cp->partial_obj_size() != 0) { | |
2943 beg_addr = sd.partial_obj_end(beg_chunk); | |
2944 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) { | |
2945 beg_addr = mbm->find_obj_beg(beg_addr, end_addr); | |
2946 } | |
2947 } | |
2948 | |
2949 if (beg_addr < end_addr) { | |
2950 // A live object or block of dead space starts in this range of Chunks. | |
2951 HeapWord* const dense_prefix_end = dense_prefix(space_id); | |
2952 | |
2953 // Create closures and iterate. | |
2954 UpdateOnlyClosure update_closure(mbm, cm, space_id); | |
2955 FillClosure fill_closure(cm, space_id); | |
2956 ParMarkBitMap::IterationStatus status; | |
2957 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr, | |
2958 dense_prefix_end); | |
2959 if (status == ParMarkBitMap::incomplete) { | |
2960 update_closure.do_addr(update_closure.source()); | |
2961 } | |
2962 } | |
2963 | |
2964 // Mark the chunks as filled. | |
2965 ChunkData* const beg_cp = sd.chunk(beg_chunk); | |
2966 ChunkData* const end_cp = sd.chunk(end_chunk); | |
2967 for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) { | |
2968 cp->set_completed(); | |
2969 } | |
2970 } | |
2971 | |
2972 // Return the SpaceId for the space containing addr. If addr is not in the | |
2973 // heap, last_space_id is returned. In debug mode it expects the address to be | |
2974 // in the heap and asserts such. | |
2975 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { | |
2976 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap"); | |
2977 | |
2978 for (unsigned int id = perm_space_id; id < last_space_id; ++id) { | |
2979 if (_space_info[id].space()->contains(addr)) { | |
2980 return SpaceId(id); | |
2981 } | |
2982 } | |
2983 | |
2984 assert(false, "no space contains the addr"); | |
2985 return last_space_id; | |
2986 } | |
2987 | |
2988 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm, | |
2989 SpaceId id) { | |
2990 assert(id < last_space_id, "bad space id"); | |
2991 | |
2992 ParallelCompactData& sd = summary_data(); | |
2993 const SpaceInfo* const space_info = _space_info + id; | |
2994 ObjectStartArray* const start_array = space_info->start_array(); | |
2995 | |
2996 const MutableSpace* const space = space_info->space(); | |
2997 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set"); | |
2998 HeapWord* const beg_addr = space_info->dense_prefix(); | |
2999 HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top()); | |
3000 | |
3001 const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr); | |
3002 const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr); | |
3003 const ChunkData* cur_chunk; | |
3004 for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) { | |
3005 HeapWord* const addr = cur_chunk->deferred_obj_addr(); | |
3006 if (addr != NULL) { | |
3007 if (start_array != NULL) { | |
3008 start_array->allocate_block(addr); | |
3009 } | |
3010 oop(addr)->update_contents(cm); | |
3011 assert(oop(addr)->is_oop_or_null(), "should be an oop now"); | |
3012 } | |
3013 } | |
3014 } | |
3015 | |
3016 // Skip over count live words starting from beg, and return the address of the | |
3017 // next live word. Unless marked, the word corresponding to beg is assumed to | |
3018 // be dead. Callers must either ensure beg does not correspond to the middle of | |
3019 // an object, or account for those live words in some other way. Callers must | |
3020 // also ensure that there are enough live words in the range [beg, end) to skip. | |
3021 HeapWord* | |
3022 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count) | |
3023 { | |
3024 assert(count > 0, "sanity"); | |
3025 | |
3026 ParMarkBitMap* m = mark_bitmap(); | |
3027 idx_t bits_to_skip = m->words_to_bits(count); | |
3028 idx_t cur_beg = m->addr_to_bit(beg); | |
3029 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end)); | |
3030 | |
3031 do { | |
3032 cur_beg = m->find_obj_beg(cur_beg, search_end); | |
3033 idx_t cur_end = m->find_obj_end(cur_beg, search_end); | |
3034 const size_t obj_bits = cur_end - cur_beg + 1; | |
3035 if (obj_bits > bits_to_skip) { | |
3036 return m->bit_to_addr(cur_beg + bits_to_skip); | |
3037 } | |
3038 bits_to_skip -= obj_bits; | |
3039 cur_beg = cur_end + 1; | |
3040 } while (bits_to_skip > 0); | |
3041 | |
3042 // Skipping the desired number of words landed just past the end of an object. | |
3043 // Find the start of the next object. | |
3044 cur_beg = m->find_obj_beg(cur_beg, search_end); | |
3045 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip"); | |
3046 return m->bit_to_addr(cur_beg); | |
3047 } | |
3048 | |
3049 HeapWord* | |
3050 PSParallelCompact::first_src_addr(HeapWord* const dest_addr, | |
3051 size_t src_chunk_idx) | |
3052 { | |
3053 ParMarkBitMap* const bitmap = mark_bitmap(); | |
3054 const ParallelCompactData& sd = summary_data(); | |
3055 const size_t ChunkSize = ParallelCompactData::ChunkSize; | |
3056 | |
3057 assert(sd.is_chunk_aligned(dest_addr), "not aligned"); | |
3058 | |
3059 const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx); | |
3060 const size_t partial_obj_size = src_chunk_ptr->partial_obj_size(); | |
3061 HeapWord* const src_chunk_destination = src_chunk_ptr->destination(); | |
3062 | |
3063 assert(dest_addr >= src_chunk_destination, "wrong src chunk"); | |
3064 assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty"); | |
3065 | |
3066 HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx); | |
3067 HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize; | |
3068 | |
3069 HeapWord* addr = src_chunk_beg; | |
3070 if (dest_addr == src_chunk_destination) { | |
3071 // Return the first live word in the source chunk. | |
3072 if (partial_obj_size == 0) { | |
3073 addr = bitmap->find_obj_beg(addr, src_chunk_end); | |
3074 assert(addr < src_chunk_end, "no objects start in src chunk"); | |
3075 } | |
3076 return addr; | |
3077 } | |
3078 | |
3079 // Must skip some live data. | |
3080 size_t words_to_skip = dest_addr - src_chunk_destination; | |
3081 assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk"); | |
3082 | |
3083 if (partial_obj_size >= words_to_skip) { | |
3084 // All the live words to skip are part of the partial object. | |
3085 addr += words_to_skip; | |
3086 if (partial_obj_size == words_to_skip) { | |
3087 // Find the first live word past the partial object. | |
3088 addr = bitmap->find_obj_beg(addr, src_chunk_end); | |
3089 assert(addr < src_chunk_end, "wrong src chunk"); | |
3090 } | |
3091 return addr; | |
3092 } | |
3093 | |
3094 // Skip over the partial object (if any). | |
3095 if (partial_obj_size != 0) { | |
3096 words_to_skip -= partial_obj_size; | |
3097 addr += partial_obj_size; | |
3098 } | |
3099 | |
3100 // Skip over live words due to objects that start in the chunk. | |
3101 addr = skip_live_words(addr, src_chunk_end, words_to_skip); | |
3102 assert(addr < src_chunk_end, "wrong src chunk"); | |
3103 return addr; | |
3104 } | |
3105 | |
3106 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, | |
3107 size_t beg_chunk, | |
3108 HeapWord* end_addr) | |
3109 { | |
3110 ParallelCompactData& sd = summary_data(); | |
3111 ChunkData* const beg = sd.chunk(beg_chunk); | |
3112 HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr); | |
3113 ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up); | |
3114 size_t cur_idx = beg_chunk; | |
3115 for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) { | |
3116 assert(cur->data_size() > 0, "chunk must have live data"); | |
3117 cur->decrement_destination_count(); | |
3118 if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) { | |
3119 cm->save_for_processing(cur_idx); | |
3120 } | |
3121 } | |
3122 } | |
3123 | |
3124 size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure, | |
3125 SpaceId& src_space_id, | |
3126 HeapWord*& src_space_top, | |
3127 HeapWord* end_addr) | |
3128 { | |
3129 typedef ParallelCompactData::ChunkData ChunkData; | |
3130 | |
3131 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
3132 const size_t chunk_size = ParallelCompactData::ChunkSize; | |
3133 | |
3134 size_t src_chunk_idx = 0; | |
3135 | |
3136 // Skip empty chunks (if any) up to the top of the space. | |
3137 HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr); | |
3138 ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up); | |
3139 HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top); | |
3140 const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up); | |
3141 while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) { | |
3142 ++src_chunk_ptr; | |
3143 } | |
3144 | |
3145 if (src_chunk_ptr < top_chunk_ptr) { | |
3146 // The next source chunk is in the current space. Update src_chunk_idx and | |
3147 // the source address to match src_chunk_ptr. | |
3148 src_chunk_idx = sd.chunk(src_chunk_ptr); | |
3149 HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx); | |
3150 if (src_chunk_addr > closure.source()) { | |
3151 closure.set_source(src_chunk_addr); | |
3152 } | |
3153 return src_chunk_idx; | |
3154 } | |
3155 | |
3156 // Switch to a new source space and find the first non-empty chunk. | |
3157 unsigned int space_id = src_space_id + 1; | |
3158 assert(space_id < last_space_id, "not enough spaces"); | |
3159 | |
3160 HeapWord* const destination = closure.destination(); | |
3161 | |
3162 do { | |
3163 MutableSpace* space = _space_info[space_id].space(); | |
3164 HeapWord* const bottom = space->bottom(); | |
3165 const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom); | |
3166 | |
3167 // Iterate over the spaces that do not compact into themselves. | |
3168 if (bottom_cp->destination() != bottom) { | |
3169 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); | |
3170 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); | |
3171 | |
3172 for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { | |
3173 if (src_cp->live_obj_size() > 0) { | |
3174 // Found it. | |
3175 assert(src_cp->destination() == destination, | |
3176 "first live obj in the space must match the destination"); | |
3177 assert(src_cp->partial_obj_size() == 0, | |
3178 "a space cannot begin with a partial obj"); | |
3179 | |
3180 src_space_id = SpaceId(space_id); | |
3181 src_space_top = space->top(); | |
3182 const size_t src_chunk_idx = sd.chunk(src_cp); | |
3183 closure.set_source(sd.chunk_to_addr(src_chunk_idx)); | |
3184 return src_chunk_idx; | |
3185 } else { | |
3186 assert(src_cp->data_size() == 0, "sanity"); | |
3187 } | |
3188 } | |
3189 } | |
3190 } while (++space_id < last_space_id); | |
3191 | |
3192 assert(false, "no source chunk was found"); | |
3193 return 0; | |
3194 } | |
3195 | |
3196 void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx) | |
3197 { | |
3198 typedef ParMarkBitMap::IterationStatus IterationStatus; | |
3199 const size_t ChunkSize = ParallelCompactData::ChunkSize; | |
3200 ParMarkBitMap* const bitmap = mark_bitmap(); | |
3201 ParallelCompactData& sd = summary_data(); | |
3202 ChunkData* const chunk_ptr = sd.chunk(chunk_idx); | |
3203 | |
3204 // Get the items needed to construct the closure. | |
3205 HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx); | |
3206 SpaceId dest_space_id = space_id(dest_addr); | |
3207 ObjectStartArray* start_array = _space_info[dest_space_id].start_array(); | |
3208 HeapWord* new_top = _space_info[dest_space_id].new_top(); | |
3209 assert(dest_addr < new_top, "sanity"); | |
3210 const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize); | |
3211 | |
3212 // Get the source chunk and related info. | |
3213 size_t src_chunk_idx = chunk_ptr->source_chunk(); | |
3214 SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx)); | |
3215 HeapWord* src_space_top = _space_info[src_space_id].space()->top(); | |
3216 | |
3217 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); | |
3218 closure.set_source(first_src_addr(dest_addr, src_chunk_idx)); | |
3219 | |
3220 // Adjust src_chunk_idx to prepare for decrementing destination counts (the | |
3221 // destination count is not decremented when a chunk is copied to itself). | |
3222 if (src_chunk_idx == chunk_idx) { | |
3223 src_chunk_idx += 1; | |
3224 } | |
3225 | |
3226 if (bitmap->is_unmarked(closure.source())) { | |
3227 // The first source word is in the middle of an object; copy the remainder | |
3228 // of the object or as much as will fit. The fact that pointer updates were | |
3229 // deferred will be noted when the object header is processed. | |
3230 HeapWord* const old_src_addr = closure.source(); | |
3231 closure.copy_partial_obj(); | |
3232 if (closure.is_full()) { | |
3233 decrement_destination_counts(cm, src_chunk_idx, closure.source()); | |
3234 chunk_ptr->set_deferred_obj_addr(NULL); | |
3235 chunk_ptr->set_completed(); | |
3236 return; | |
3237 } | |
3238 | |
3239 HeapWord* const end_addr = sd.chunk_align_down(closure.source()); | |
3240 if (sd.chunk_align_down(old_src_addr) != end_addr) { | |
3241 // The partial object was copied from more than one source chunk. | |
3242 decrement_destination_counts(cm, src_chunk_idx, end_addr); | |
3243 | |
3244 // Move to the next source chunk, possibly switching spaces as well. All | |
3245 // args except end_addr may be modified. | |
3246 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, | |
3247 end_addr); | |
3248 } | |
3249 } | |
3250 | |
3251 do { | |
3252 HeapWord* const cur_addr = closure.source(); | |
3253 HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1), | |
3254 src_space_top); | |
3255 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr); | |
3256 | |
3257 if (status == ParMarkBitMap::incomplete) { | |
3258 // The last obj that starts in the source chunk does not end in the chunk. | |
3259 assert(closure.source() < end_addr, "sanity") | |
3260 HeapWord* const obj_beg = closure.source(); | |
3261 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(), | |
3262 src_space_top); | |
3263 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end); | |
3264 if (obj_end < range_end) { | |
3265 // The end was found; the entire object will fit. | |
3266 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end)); | |
3267 assert(status != ParMarkBitMap::would_overflow, "sanity"); | |
3268 } else { | |
3269 // The end was not found; the object will not fit. | |
3270 assert(range_end < src_space_top, "obj cannot cross space boundary"); | |
3271 status = ParMarkBitMap::would_overflow; | |
3272 } | |
3273 } | |
3274 | |
3275 if (status == ParMarkBitMap::would_overflow) { | |
3276 // The last object did not fit. Note that interior oop updates were | |
3277 // deferred, then copy enough of the object to fill the chunk. | |
3278 chunk_ptr->set_deferred_obj_addr(closure.destination()); | |
3279 status = closure.copy_until_full(); // copies from closure.source() | |
3280 | |
3281 decrement_destination_counts(cm, src_chunk_idx, closure.source()); | |
3282 chunk_ptr->set_completed(); | |
3283 return; | |
3284 } | |
3285 | |
3286 if (status == ParMarkBitMap::full) { | |
3287 decrement_destination_counts(cm, src_chunk_idx, closure.source()); | |
3288 chunk_ptr->set_deferred_obj_addr(NULL); | |
3289 chunk_ptr->set_completed(); | |
3290 return; | |
3291 } | |
3292 | |
3293 decrement_destination_counts(cm, src_chunk_idx, end_addr); | |
3294 | |
3295 // Move to the next source chunk, possibly switching spaces as well. All | |
3296 // args except end_addr may be modified. | |
3297 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, | |
3298 end_addr); | |
3299 } while (true); | |
3300 } | |
3301 | |
3302 void | |
3303 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) { | |
3304 const MutableSpace* sp = space(space_id); | |
3305 if (sp->is_empty()) { | |
3306 return; | |
3307 } | |
3308 | |
3309 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
3310 ParMarkBitMap* const bitmap = mark_bitmap(); | |
3311 HeapWord* const dp_addr = dense_prefix(space_id); | |
3312 HeapWord* beg_addr = sp->bottom(); | |
3313 HeapWord* end_addr = sp->top(); | |
3314 | |
3315 #ifdef ASSERT | |
3316 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix"); | |
3317 if (cm->should_verify_only()) { | |
3318 VerifyUpdateClosure verify_update(cm, sp); | |
3319 bitmap->iterate(&verify_update, beg_addr, end_addr); | |
3320 return; | |
3321 } | |
3322 | |
3323 if (cm->should_reset_only()) { | |
3324 ResetObjectsClosure reset_objects(cm); | |
3325 bitmap->iterate(&reset_objects, beg_addr, end_addr); | |
3326 return; | |
3327 } | |
3328 #endif | |
3329 | |
3330 const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr); | |
3331 const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr); | |
3332 if (beg_chunk < dp_chunk) { | |
3333 update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk); | |
3334 } | |
3335 | |
3336 // The destination of the first live object that starts in the chunk is one | |
3337 // past the end of the partial object entering the chunk (if any). | |
3338 HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk); | |
3339 HeapWord* const new_top = _space_info[space_id].new_top(); | |
3340 assert(new_top >= dest_addr, "bad new_top value"); | |
3341 const size_t words = pointer_delta(new_top, dest_addr); | |
3342 | |
3343 if (words > 0) { | |
3344 ObjectStartArray* start_array = _space_info[space_id].start_array(); | |
3345 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); | |
3346 | |
3347 ParMarkBitMap::IterationStatus status; | |
3348 status = bitmap->iterate(&closure, dest_addr, end_addr); | |
3349 assert(status == ParMarkBitMap::full, "iteration not complete"); | |
3350 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr, | |
3351 "live objects skipped because closure is full"); | |
3352 } | |
3353 } | |
3354 | |
3355 jlong PSParallelCompact::millis_since_last_gc() { | |
3356 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; | |
3357 // XXX See note in genCollectedHeap::millis_since_last_gc(). | |
3358 if (ret_val < 0) { | |
3359 NOT_PRODUCT(warning("time warp: %d", ret_val);) | |
3360 return 0; | |
3361 } | |
3362 return ret_val; | |
3363 } | |
3364 | |
3365 void PSParallelCompact::reset_millis_since_last_gc() { | |
3366 _time_of_last_gc = os::javaTimeMillis(); | |
3367 } | |
3368 | |
3369 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() | |
3370 { | |
3371 if (source() != destination()) { | |
3372 assert(source() > destination(), "must copy to the left"); | |
3373 Copy::aligned_conjoint_words(source(), destination(), words_remaining()); | |
3374 } | |
3375 update_state(words_remaining()); | |
3376 assert(is_full(), "sanity"); | |
3377 return ParMarkBitMap::full; | |
3378 } | |
3379 | |
3380 void MoveAndUpdateClosure::copy_partial_obj() | |
3381 { | |
3382 size_t words = words_remaining(); | |
3383 | |
3384 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end()); | |
3385 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end); | |
3386 if (end_addr < range_end) { | |
3387 words = bitmap()->obj_size(source(), end_addr); | |
3388 } | |
3389 | |
3390 // This test is necessary; if omitted, the pointer updates to a partial object | |
3391 // that crosses the dense prefix boundary could be overwritten. | |
3392 if (source() != destination()) { | |
3393 assert(source() > destination(), "must copy to the left"); | |
3394 Copy::aligned_conjoint_words(source(), destination(), words); | |
3395 } | |
3396 update_state(words); | |
3397 } | |
3398 | |
3399 ParMarkBitMapClosure::IterationStatus | |
3400 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { | |
3401 assert(destination() != NULL, "sanity"); | |
3402 assert(bitmap()->obj_size(addr) == words, "bad size"); | |
3403 | |
3404 _source = addr; | |
3405 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) == | |
3406 destination(), "wrong destination"); | |
3407 | |
3408 if (words > words_remaining()) { | |
3409 return ParMarkBitMap::would_overflow; | |
3410 } | |
3411 | |
3412 // The start_array must be updated even if the object is not moving. | |
3413 if (_start_array != NULL) { | |
3414 _start_array->allocate_block(destination()); | |
3415 } | |
3416 | |
3417 if (destination() != source()) { | |
3418 assert(destination() < source(), "must copy to the left"); | |
3419 Copy::aligned_conjoint_words(source(), destination(), words); | |
3420 } | |
3421 | |
3422 oop moved_oop = (oop) destination(); | |
3423 moved_oop->update_contents(compaction_manager()); | |
3424 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point"); | |
3425 | |
3426 update_state(words); | |
3427 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); | |
3428 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; | |
3429 } | |
3430 | |
3431 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm, | |
3432 ParCompactionManager* cm, | |
3433 PSParallelCompact::SpaceId space_id) : | |
3434 ParMarkBitMapClosure(mbm, cm), | |
3435 _space_id(space_id), | |
3436 _start_array(PSParallelCompact::start_array(space_id)) | |
3437 { | |
3438 } | |
3439 | |
3440 // Updates the references in the object to their new values. | |
3441 ParMarkBitMapClosure::IterationStatus | |
3442 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) { | |
3443 do_addr(addr); | |
3444 return ParMarkBitMap::incomplete; | |
3445 } | |
3446 | |
3447 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm, | |
3448 ParCompactionManager* cm, | |
3449 size_t chunk_index) : | |
3450 ParMarkBitMapClosure(mbm, cm), | |
3451 _live_data_left(0), | |
3452 _cur_block(0) { | |
3453 _chunk_start = | |
3454 PSParallelCompact::summary_data().chunk_to_addr(chunk_index); | |
3455 _chunk_end = | |
3456 PSParallelCompact::summary_data().chunk_to_addr(chunk_index) + | |
3457 ParallelCompactData::ChunkSize; | |
3458 _chunk_index = chunk_index; | |
3459 _cur_block = | |
3460 PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start); | |
3461 } | |
3462 | |
3463 bool BitBlockUpdateClosure::chunk_contains_cur_block() { | |
3464 return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block); | |
3465 } | |
3466 | |
3467 void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) { | |
3468 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);) | |
3469 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
3470 _chunk_index = chunk_index; | |
3471 _live_data_left = 0; | |
3472 _chunk_start = sd.chunk_to_addr(chunk_index); | |
3473 _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize; | |
3474 | |
3475 // The first block in this chunk | |
3476 size_t first_block = sd.addr_to_block_idx(_chunk_start); | |
3477 size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size(); | |
3478 | |
3479 // Set the offset to 0. By definition it should have that value | |
3480 // but it may have been written while processing an earlier chunk. | |
3481 if (partial_live_size == 0) { | |
3482 // No live object extends onto the chunk. The first bit | |
3483 // in the bit map for the first chunk must be a start bit. | |
3484 // Although there may not be any marked bits, it is safe | |
3485 // to set it as a start bit. | |
3486 sd.block(first_block)->set_start_bit_offset(0); | |
3487 sd.block(first_block)->set_first_is_start_bit(true); | |
3488 } else if (sd.partial_obj_ends_in_block(first_block)) { | |
3489 sd.block(first_block)->set_end_bit_offset(0); | |
3490 sd.block(first_block)->set_first_is_start_bit(false); | |
3491 } else { | |
3492 // The partial object extends beyond the first block. | |
3493 // There is no object starting in the first block | |
3494 // so the offset and bit parity are not needed. | |
3495 // Set the the bit parity to start bit so assertions | |
3496 // work when not bit is found. | |
3497 sd.block(first_block)->set_end_bit_offset(0); | |
3498 sd.block(first_block)->set_first_is_start_bit(false); | |
3499 } | |
3500 _cur_block = first_block; | |
3501 #ifdef ASSERT | |
3502 if (sd.block(first_block)->first_is_start_bit()) { | |
3503 assert(!sd.partial_obj_ends_in_block(first_block), | |
3504 "Partial object cannot end in first block"); | |
3505 } | |
3506 | |
3507 if (PrintGCDetails && Verbose) { | |
3508 if (partial_live_size == 1) { | |
3509 gclog_or_tty->print_cr("first_block " PTR_FORMAT | |
3510 " _offset " PTR_FORMAT | |
3511 " _first_is_start_bit %d", | |
3512 first_block, | |
3513 sd.block(first_block)->raw_offset(), | |
3514 sd.block(first_block)->first_is_start_bit()); | |
3515 } | |
3516 } | |
3517 #endif | |
3518 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);) | |
3519 } | |
3520 | |
3521 // This method is called when a object has been found (both beginning | |
3522 // and end of the object) in the range of iteration. This method is | |
3523 // calculating the words of live data to the left of a block. That live | |
3524 // data includes any object starting to the left of the block (i.e., | |
3525 // the live-data-to-the-left of block AAA will include the full size | |
3526 // of any object entering AAA). | |
3527 | |
3528 ParMarkBitMapClosure::IterationStatus | |
3529 BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) { | |
3530 // add the size to the block data. | |
3531 HeapWord* obj = addr; | |
3532 ParallelCompactData& sd = PSParallelCompact::summary_data(); | |
3533 | |
3534 assert(bitmap()->obj_size(obj) == words, "bad size"); | |
3535 assert(_chunk_start <= obj, "object is not in chunk"); | |
3536 assert(obj + words <= _chunk_end, "object is not in chunk"); | |
3537 | |
3538 // Update the live data to the left | |
3539 size_t prev_live_data_left = _live_data_left; | |
3540 _live_data_left = _live_data_left + words; | |
3541 | |
3542 // Is this object in the current block. | |
3543 size_t block_of_obj = sd.addr_to_block_idx(obj); | |
3544 size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1); | |
3545 HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last); | |
3546 if (_cur_block < block_of_obj) { | |
3547 | |
3548 // | |
3549 // No object crossed the block boundary and this object was found | |
3550 // on the other side of the block boundary. Update the offset for | |
3551 // the new block with the data size that does not include this object. | |
3552 // | |
3553 // The first bit in block_of_obj is a start bit except in the | |
3554 // case where the partial object for the chunk extends into | |
3555 // this block. | |
3556 if (sd.partial_obj_ends_in_block(block_of_obj)) { | |
3557 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left); | |
3558 } else { | |
3559 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left); | |
3560 } | |
3561 | |
3562 // Does this object pass beyond the its block? | |
3563 if (block_of_obj < block_of_obj_last) { | |
3564 // Object crosses block boundary. Two blocks need to be udpated: | |
3565 // the current block where the object started | |
3566 // the block where the object ends | |
3567 // | |
3568 // The offset for blocks with no objects starting in them | |
3569 // (e.g., blocks between _cur_block and block_of_obj_last) | |
3570 // should not be needed. | |
3571 // Note that block_of_obj_last may be in another chunk. If so, | |
3572 // it should be overwritten later. This is a problem (writting | |
3573 // into a block in a later chunk) for parallel execution. | |
3574 assert(obj < block_of_obj_last_addr, | |
3575 "Object should start in previous block"); | |
3576 | |
3577 // obj is crossing into block_of_obj_last so the first bit | |
3578 // is and end bit. | |
3579 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); | |
3580 | |
3581 _cur_block = block_of_obj_last; | |
3582 } else { | |
3583 // _first_is_start_bit has already been set correctly | |
3584 // in the if-then-else above so don't reset it here. | |
3585 _cur_block = block_of_obj; | |
3586 } | |
3587 } else { | |
3588 // The current block only changes if the object extends beyound | |
3589 // the block it starts in. | |
3590 // | |
3591 // The object starts in the current block. | |
3592 // Does this object pass beyond the end of it? | |
3593 if (block_of_obj < block_of_obj_last) { | |
3594 // Object crosses block boundary. | |
3595 // See note above on possible blocks between block_of_obj and | |
3596 // block_of_obj_last | |
3597 assert(obj < block_of_obj_last_addr, | |
3598 "Object should start in previous block"); | |
3599 | |
3600 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); | |
3601 | |
3602 _cur_block = block_of_obj_last; | |
3603 } | |
3604 } | |
3605 | |
3606 // Return incomplete if there are more blocks to be done. | |
3607 if (chunk_contains_cur_block()) { | |
3608 return ParMarkBitMap::incomplete; | |
3609 } | |
3610 return ParMarkBitMap::complete; | |
3611 } | |
3612 | |
3613 // Verify the new location using the forwarding pointer | |
3614 // from MarkSweep::mark_sweep_phase2(). Set the mark_word | |
3615 // to the initial value. | |
3616 ParMarkBitMapClosure::IterationStatus | |
3617 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) { | |
3618 // The second arg (words) is not used. | |
3619 oop obj = (oop) addr; | |
3620 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer(); | |
3621 HeapWord* new_pointer = summary_data().calc_new_pointer(obj); | |
3622 if (forwarding_ptr == NULL) { | |
3623 // The object is dead or not moving. | |
3624 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj), | |
3625 "Object liveness is wrong."); | |
3626 return ParMarkBitMap::incomplete; | |
3627 } | |
3628 assert(UseParallelOldGCDensePrefix || | |
3629 (HeapMaximumCompactionInterval > 1) || | |
3630 (MarkSweepAlwaysCompactCount > 1) || | |
3631 (forwarding_ptr == new_pointer), | |
3632 "Calculation of new location is incorrect"); | |
3633 return ParMarkBitMap::incomplete; | |
3634 } | |
3635 | |
3636 // Reset objects modified for debug checking. | |
3637 ParMarkBitMapClosure::IterationStatus | |
3638 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) { | |
3639 // The second arg (words) is not used. | |
3640 oop obj = (oop) addr; | |
3641 obj->init_mark(); | |
3642 return ParMarkBitMap::incomplete; | |
3643 } | |
3644 | |
3645 // Prepare for compaction. This method is executed once | |
3646 // (i.e., by a single thread) before compaction. | |
3647 // Save the updated location of the intArrayKlassObj for | |
3648 // filling holes in the dense prefix. | |
3649 void PSParallelCompact::compact_prologue() { | |
3650 _updated_int_array_klass_obj = (klassOop) | |
3651 summary_data().calc_new_pointer(Universe::intArrayKlassObj()); | |
3652 } | |
3653 | |
3654 // The initial implementation of this method created a field | |
3655 // _next_compaction_space_id in SpaceInfo and initialized | |
3656 // that field in SpaceInfo::initialize_space_info(). That | |
3657 // required that _next_compaction_space_id be declared a | |
3658 // SpaceId in SpaceInfo and that would have required that | |
3659 // either SpaceId be declared in a separate class or that | |
3660 // it be declared in SpaceInfo. It didn't seem consistent | |
3661 // to declare it in SpaceInfo (didn't really fit logically). | |
3662 // Alternatively, defining a separate class to define SpaceId | |
3663 // seem excessive. This implementation is simple and localizes | |
3664 // the knowledge. | |
3665 | |
3666 PSParallelCompact::SpaceId | |
3667 PSParallelCompact::next_compaction_space_id(SpaceId id) { | |
3668 assert(id < last_space_id, "id out of range"); | |
3669 switch (id) { | |
3670 case perm_space_id : | |
3671 return last_space_id; | |
3672 case old_space_id : | |
3673 return eden_space_id; | |
3674 case eden_space_id : | |
3675 return from_space_id; | |
3676 case from_space_id : | |
3677 return to_space_id; | |
3678 case to_space_id : | |
3679 return last_space_id; | |
3680 default: | |
3681 assert(false, "Bad space id"); | |
3682 return last_space_id; | |
3683 } | |
3684 } | |
3685 | |
3686 // Here temporarily for debugging | |
3687 #ifdef ASSERT | |
3688 size_t ParallelCompactData::block_idx(BlockData* block) { | |
3689 size_t index = pointer_delta(block, | |
3690 PSParallelCompact::summary_data()._block_data, sizeof(BlockData)); | |
3691 return index; | |
3692 } | |
3693 #endif |