comparison src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 82db0859acbe c0492d52d55b
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_psParallelCompact.cpp.incl"
27
28 #include <math.h>
29
30 // All sizes are in HeapWords.
31 const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words
32 const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize;
33 const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize;
34 const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1;
35 const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1;
36 const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask;
37
38 // 32-bit: 128 words covers 4 bitmap words
39 // 64-bit: 128 words covers 2 bitmap words
40 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
41 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
42 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1;
43 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask;
44
45 const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize;
46
47 const ParallelCompactData::ChunkData::chunk_sz_t
48 ParallelCompactData::ChunkData::dc_shift = 27;
49
50 const ParallelCompactData::ChunkData::chunk_sz_t
51 ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift;
52
53 const ParallelCompactData::ChunkData::chunk_sz_t
54 ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift;
55
56 const ParallelCompactData::ChunkData::chunk_sz_t
57 ParallelCompactData::ChunkData::los_mask = ~dc_mask;
58
59 const ParallelCompactData::ChunkData::chunk_sz_t
60 ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift;
61
62 const ParallelCompactData::ChunkData::chunk_sz_t
63 ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift;
64
65 #ifdef ASSERT
66 short ParallelCompactData::BlockData::_cur_phase = 0;
67 #endif
68
69 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
70 bool PSParallelCompact::_print_phases = false;
71
72 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
73 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
74
75 double PSParallelCompact::_dwl_mean;
76 double PSParallelCompact::_dwl_std_dev;
77 double PSParallelCompact::_dwl_first_term;
78 double PSParallelCompact::_dwl_adjustment;
79 #ifdef ASSERT
80 bool PSParallelCompact::_dwl_initialized = false;
81 #endif // #ifdef ASSERT
82
83 #ifdef VALIDATE_MARK_SWEEP
84 GrowableArray<oop*>* PSParallelCompact::_root_refs_stack = NULL;
85 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
86 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
87 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
88 size_t PSParallelCompact::_live_oops_index = 0;
89 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
90 GrowableArray<oop*>* PSParallelCompact::_other_refs_stack = NULL;
91 GrowableArray<oop*>* PSParallelCompact::_adjusted_pointers = NULL;
92 bool PSParallelCompact::_pointer_tracking = false;
93 bool PSParallelCompact::_root_tracking = true;
94
95 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
96 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
97 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
98 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
99 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
100 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
101 #endif
102
103 // XXX beg - verification code; only works while we also mark in object headers
104 static void
105 verify_mark_bitmap(ParMarkBitMap& _mark_bitmap)
106 {
107 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
108
109 PSPermGen* perm_gen = heap->perm_gen();
110 PSOldGen* old_gen = heap->old_gen();
111 PSYoungGen* young_gen = heap->young_gen();
112
113 MutableSpace* perm_space = perm_gen->object_space();
114 MutableSpace* old_space = old_gen->object_space();
115 MutableSpace* eden_space = young_gen->eden_space();
116 MutableSpace* from_space = young_gen->from_space();
117 MutableSpace* to_space = young_gen->to_space();
118
119 // 'from_space' here is the survivor space at the lower address.
120 if (to_space->bottom() < from_space->bottom()) {
121 from_space = to_space;
122 to_space = young_gen->from_space();
123 }
124
125 HeapWord* boundaries[12];
126 unsigned int bidx = 0;
127 const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]);
128
129 boundaries[0] = perm_space->bottom();
130 boundaries[1] = perm_space->top();
131 boundaries[2] = old_space->bottom();
132 boundaries[3] = old_space->top();
133 boundaries[4] = eden_space->bottom();
134 boundaries[5] = eden_space->top();
135 boundaries[6] = from_space->bottom();
136 boundaries[7] = from_space->top();
137 boundaries[8] = to_space->bottom();
138 boundaries[9] = to_space->top();
139 boundaries[10] = to_space->end();
140 boundaries[11] = to_space->end();
141
142 BitMap::idx_t beg_bit = 0;
143 BitMap::idx_t end_bit;
144 BitMap::idx_t tmp_bit;
145 const BitMap::idx_t last_bit = _mark_bitmap.size();
146 do {
147 HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit);
148 if (_mark_bitmap.is_marked(beg_bit)) {
149 oop obj = (oop)addr;
150 assert(obj->is_gc_marked(), "obj header is not marked");
151 end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit);
152 const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit);
153 assert(size == (size_t)obj->size(), "end bit wrong?");
154 beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit);
155 assert(beg_bit > end_bit, "bit set in middle of an obj");
156 } else {
157 if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) {
158 // a dead object in the current space.
159 oop obj = (oop)addr;
160 end_bit = _mark_bitmap.addr_to_bit(addr + obj->size());
161 assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap");
162 tmp_bit = beg_bit + 1;
163 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
164 assert(beg_bit == end_bit, "beg bit set in unmarked obj");
165 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
166 assert(beg_bit == end_bit, "end bit set in unmarked obj");
167 } else if (addr < boundaries[bidx + 2]) {
168 // addr is between top in the current space and bottom in the next.
169 end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr);
170 tmp_bit = beg_bit;
171 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
172 assert(beg_bit == end_bit, "beg bit set above top");
173 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
174 assert(beg_bit == end_bit, "end bit set above top");
175 bidx += 2;
176 } else if (bidx < bidx_max - 2) {
177 bidx += 2; // ???
178 } else {
179 tmp_bit = beg_bit;
180 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit);
181 assert(beg_bit == last_bit, "beg bit set outside heap");
182 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit);
183 assert(beg_bit == last_bit, "end bit set outside heap");
184 }
185 }
186 } while (beg_bit < last_bit);
187 }
188 // XXX end - verification code; only works while we also mark in object headers
189
190 #ifndef PRODUCT
191 const char* PSParallelCompact::space_names[] = {
192 "perm", "old ", "eden", "from", "to "
193 };
194
195 void PSParallelCompact::print_chunk_ranges()
196 {
197 tty->print_cr("space bottom top end new_top");
198 tty->print_cr("------ ---------- ---------- ---------- ----------");
199
200 for (unsigned int id = 0; id < last_space_id; ++id) {
201 const MutableSpace* space = _space_info[id].space();
202 tty->print_cr("%u %s "
203 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
204 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
205 id, space_names[id],
206 summary_data().addr_to_chunk_idx(space->bottom()),
207 summary_data().addr_to_chunk_idx(space->top()),
208 summary_data().addr_to_chunk_idx(space->end()),
209 summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
210 }
211 }
212
213 void
214 print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
215 {
216 #define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7")
217 #define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5")
218
219 ParallelCompactData& sd = PSParallelCompact::summary_data();
220 size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
221 tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " "
222 CHUNK_IDX_FORMAT " " PTR_FORMAT " "
223 CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
224 CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
225 i, c->data_location(), dci, c->destination(),
226 c->partial_obj_size(), c->live_obj_size(),
227 c->data_size(), c->source_chunk(), c->destination_count());
228
229 #undef CHUNK_IDX_FORMAT
230 #undef CHUNK_DATA_FORMAT
231 }
232
233 void
234 print_generic_summary_data(ParallelCompactData& summary_data,
235 HeapWord* const beg_addr,
236 HeapWord* const end_addr)
237 {
238 size_t total_words = 0;
239 size_t i = summary_data.addr_to_chunk_idx(beg_addr);
240 const size_t last = summary_data.addr_to_chunk_idx(end_addr);
241 HeapWord* pdest = 0;
242
243 while (i <= last) {
244 ParallelCompactData::ChunkData* c = summary_data.chunk(i);
245 if (c->data_size() != 0 || c->destination() != pdest) {
246 print_generic_summary_chunk(i, c);
247 total_words += c->data_size();
248 pdest = c->destination();
249 }
250 ++i;
251 }
252
253 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
254 }
255
256 void
257 print_generic_summary_data(ParallelCompactData& summary_data,
258 SpaceInfo* space_info)
259 {
260 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
261 const MutableSpace* space = space_info[id].space();
262 print_generic_summary_data(summary_data, space->bottom(),
263 MAX2(space->top(), space_info[id].new_top()));
264 }
265 }
266
267 void
268 print_initial_summary_chunk(size_t i,
269 const ParallelCompactData::ChunkData* c,
270 bool newline = true)
271 {
272 tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " "
273 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
274 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
275 i, c->destination(),
276 c->partial_obj_size(), c->live_obj_size(),
277 c->data_size(), c->source_chunk(), c->destination_count());
278 if (newline) tty->cr();
279 }
280
281 void
282 print_initial_summary_data(ParallelCompactData& summary_data,
283 const MutableSpace* space) {
284 if (space->top() == space->bottom()) {
285 return;
286 }
287
288 const size_t chunk_size = ParallelCompactData::ChunkSize;
289 HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top());
290 const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up);
291 const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1);
292 HeapWord* end_addr = c->destination() + c->data_size();
293 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
294
295 // Print (and count) the full chunks at the beginning of the space.
296 size_t full_chunk_count = 0;
297 size_t i = summary_data.addr_to_chunk_idx(space->bottom());
298 while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) {
299 print_initial_summary_chunk(i, summary_data.chunk(i));
300 ++full_chunk_count;
301 ++i;
302 }
303
304 size_t live_to_right = live_in_space - full_chunk_count * chunk_size;
305
306 double max_reclaimed_ratio = 0.0;
307 size_t max_reclaimed_ratio_chunk = 0;
308 size_t max_dead_to_right = 0;
309 size_t max_live_to_right = 0;
310
311 // Print the 'reclaimed ratio' for chunks while there is something live in the
312 // chunk or to the right of it. The remaining chunks are empty (and
313 // uninteresting), and computing the ratio will result in division by 0.
314 while (i < end_chunk && live_to_right > 0) {
315 c = summary_data.chunk(i);
316 HeapWord* const chunk_addr = summary_data.chunk_to_addr(i);
317 const size_t used_to_right = pointer_delta(space->top(), chunk_addr);
318 const size_t dead_to_right = used_to_right - live_to_right;
319 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
320
321 if (reclaimed_ratio > max_reclaimed_ratio) {
322 max_reclaimed_ratio = reclaimed_ratio;
323 max_reclaimed_ratio_chunk = i;
324 max_dead_to_right = dead_to_right;
325 max_live_to_right = live_to_right;
326 }
327
328 print_initial_summary_chunk(i, c, false);
329 tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"),
330 reclaimed_ratio, dead_to_right, live_to_right);
331
332 live_to_right -= c->data_size();
333 ++i;
334 }
335
336 // Any remaining chunks are empty. Print one more if there is one.
337 if (i < end_chunk) {
338 print_initial_summary_chunk(i, summary_data.chunk(i));
339 }
340
341 tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " "
342 "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
343 max_reclaimed_ratio_chunk, max_dead_to_right,
344 max_live_to_right, max_reclaimed_ratio);
345 }
346
347 void
348 print_initial_summary_data(ParallelCompactData& summary_data,
349 SpaceInfo* space_info) {
350 unsigned int id = PSParallelCompact::perm_space_id;
351 const MutableSpace* space;
352 do {
353 space = space_info[id].space();
354 print_initial_summary_data(summary_data, space);
355 } while (++id < PSParallelCompact::eden_space_id);
356
357 do {
358 space = space_info[id].space();
359 print_generic_summary_data(summary_data, space->bottom(), space->top());
360 } while (++id < PSParallelCompact::last_space_id);
361 }
362 #endif // #ifndef PRODUCT
363
364 #ifdef ASSERT
365 size_t add_obj_count;
366 size_t add_obj_size;
367 size_t mark_bitmap_count;
368 size_t mark_bitmap_size;
369 #endif // #ifdef ASSERT
370
371 ParallelCompactData::ParallelCompactData()
372 {
373 _region_start = 0;
374
375 _chunk_vspace = 0;
376 _chunk_data = 0;
377 _chunk_count = 0;
378
379 _block_vspace = 0;
380 _block_data = 0;
381 _block_count = 0;
382 }
383
384 bool ParallelCompactData::initialize(MemRegion covered_region)
385 {
386 _region_start = covered_region.start();
387 const size_t region_size = covered_region.word_size();
388 DEBUG_ONLY(_region_end = _region_start + region_size;)
389
390 assert(chunk_align_down(_region_start) == _region_start,
391 "region start not aligned");
392 assert((region_size & ChunkSizeOffsetMask) == 0,
393 "region size not a multiple of ChunkSize");
394
395 bool result = initialize_chunk_data(region_size);
396
397 // Initialize the block data if it will be used for updating pointers, or if
398 // this is a debug build.
399 if (!UseParallelOldGCChunkPointerCalc || trueInDebug) {
400 result = result && initialize_block_data(region_size);
401 }
402
403 return result;
404 }
405
406 PSVirtualSpace*
407 ParallelCompactData::create_vspace(size_t count, size_t element_size)
408 {
409 const size_t raw_bytes = count * element_size;
410 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
411 const size_t granularity = os::vm_allocation_granularity();
412 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
413
414 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
415 MAX2(page_sz, granularity);
416 ReservedSpace rs(bytes, rs_align, false);
417 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
418 rs.size());
419 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
420 if (vspace != 0) {
421 if (vspace->expand_by(bytes)) {
422 return vspace;
423 }
424 delete vspace;
425 }
426
427 return 0;
428 }
429
430 bool ParallelCompactData::initialize_chunk_data(size_t region_size)
431 {
432 const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize;
433 _chunk_vspace = create_vspace(count, sizeof(ChunkData));
434 if (_chunk_vspace != 0) {
435 _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr();
436 _chunk_count = count;
437 return true;
438 }
439 return false;
440 }
441
442 bool ParallelCompactData::initialize_block_data(size_t region_size)
443 {
444 const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize;
445 _block_vspace = create_vspace(count, sizeof(BlockData));
446 if (_block_vspace != 0) {
447 _block_data = (BlockData*)_block_vspace->reserved_low_addr();
448 _block_count = count;
449 return true;
450 }
451 return false;
452 }
453
454 void ParallelCompactData::clear()
455 {
456 if (_block_data) {
457 memset(_block_data, 0, _block_vspace->committed_size());
458 }
459 memset(_chunk_data, 0, _chunk_vspace->committed_size());
460 }
461
462 void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) {
463 assert(beg_chunk <= _chunk_count, "beg_chunk out of range");
464 assert(end_chunk <= _chunk_count, "end_chunk out of range");
465 assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize");
466
467 const size_t chunk_cnt = end_chunk - beg_chunk;
468
469 if (_block_data) {
470 const size_t blocks_per_chunk = ChunkSize / BlockSize;
471 const size_t beg_block = beg_chunk * blocks_per_chunk;
472 const size_t block_cnt = chunk_cnt * blocks_per_chunk;
473 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
474 }
475 memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData));
476 }
477
478 HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const
479 {
480 const ChunkData* cur_cp = chunk(chunk_idx);
481 const ChunkData* const end_cp = chunk(chunk_count() - 1);
482
483 HeapWord* result = chunk_to_addr(chunk_idx);
484 if (cur_cp < end_cp) {
485 do {
486 result += cur_cp->partial_obj_size();
487 } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp);
488 }
489 return result;
490 }
491
492 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
493 {
494 const size_t obj_ofs = pointer_delta(addr, _region_start);
495 const size_t beg_chunk = obj_ofs >> Log2ChunkSize;
496 const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize;
497
498 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
499 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
500
501 if (beg_chunk == end_chunk) {
502 // All in one chunk.
503 _chunk_data[beg_chunk].add_live_obj(len);
504 return;
505 }
506
507 // First chunk.
508 const size_t beg_ofs = chunk_offset(addr);
509 _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs);
510
511 klassOop klass = ((oop)addr)->klass();
512 // Middle chunks--completely spanned by this object.
513 for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) {
514 _chunk_data[chunk].set_partial_obj_size(ChunkSize);
515 _chunk_data[chunk].set_partial_obj_addr(addr);
516 }
517
518 // Last chunk.
519 const size_t end_ofs = chunk_offset(addr + len - 1);
520 _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1);
521 _chunk_data[end_chunk].set_partial_obj_addr(addr);
522 }
523
524 void
525 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
526 {
527 assert(chunk_offset(beg) == 0, "not ChunkSize aligned");
528 assert(chunk_offset(end) == 0, "not ChunkSize aligned");
529
530 size_t cur_chunk = addr_to_chunk_idx(beg);
531 const size_t end_chunk = addr_to_chunk_idx(end);
532 HeapWord* addr = beg;
533 while (cur_chunk < end_chunk) {
534 _chunk_data[cur_chunk].set_destination(addr);
535 _chunk_data[cur_chunk].set_destination_count(0);
536 _chunk_data[cur_chunk].set_source_chunk(cur_chunk);
537 _chunk_data[cur_chunk].set_data_location(addr);
538
539 // Update live_obj_size so the chunk appears completely full.
540 size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size();
541 _chunk_data[cur_chunk].set_live_obj_size(live_size);
542
543 ++cur_chunk;
544 addr += ChunkSize;
545 }
546 }
547
548 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
549 HeapWord* source_beg, HeapWord* source_end,
550 HeapWord** target_next,
551 HeapWord** source_next) {
552 // This is too strict.
553 // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned");
554
555 if (TraceParallelOldGCSummaryPhase) {
556 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
557 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
558 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
559 target_beg, target_end,
560 source_beg, source_end,
561 target_next != 0 ? *target_next : (HeapWord*) 0,
562 source_next != 0 ? *source_next : (HeapWord*) 0);
563 }
564
565 size_t cur_chunk = addr_to_chunk_idx(source_beg);
566 const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end));
567
568 HeapWord *dest_addr = target_beg;
569 while (cur_chunk < end_chunk) {
570 size_t words = _chunk_data[cur_chunk].data_size();
571
572 #if 1
573 assert(pointer_delta(target_end, dest_addr) >= words,
574 "source region does not fit into target region");
575 #else
576 // XXX - need some work on the corner cases here. If the chunk does not
577 // fit, then must either make sure any partial_obj from the chunk fits, or
578 // 'undo' the initial part of the partial_obj that is in the previous chunk.
579 if (dest_addr + words >= target_end) {
580 // Let the caller know where to continue.
581 *target_next = dest_addr;
582 *source_next = chunk_to_addr(cur_chunk);
583 return false;
584 }
585 #endif // #if 1
586
587 _chunk_data[cur_chunk].set_destination(dest_addr);
588
589 // Set the destination_count for cur_chunk, and if necessary, update
590 // source_chunk for a destination chunk. The source_chunk field is updated
591 // if cur_chunk is the first (left-most) chunk to be copied to a destination
592 // chunk.
593 //
594 // The destination_count calculation is a bit subtle. A chunk that has data
595 // that compacts into itself does not count itself as a destination. This
596 // maintains the invariant that a zero count means the chunk is available
597 // and can be claimed and then filled.
598 if (words > 0) {
599 HeapWord* const last_addr = dest_addr + words - 1;
600 const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr);
601 const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr);
602 #if 0
603 // Initially assume that the destination chunks will be the same and
604 // adjust the value below if necessary. Under this assumption, if
605 // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely
606 // into itself.
607 uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
608 if (dest_chunk_1 != dest_chunk_2) {
609 // Destination chunks differ; adjust destination_count.
610 destination_count += 1;
611 // Data from cur_chunk will be copied to the start of dest_chunk_2.
612 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
613 } else if (chunk_offset(dest_addr) == 0) {
614 // Data from cur_chunk will be copied to the start of the destination
615 // chunk.
616 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
617 }
618 #else
619 // Initially assume that the destination chunks will be different and
620 // adjust the value below if necessary. Under this assumption, if
621 // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially
622 // into dest_chunk_1 and partially into itself.
623 uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
624 if (dest_chunk_1 != dest_chunk_2) {
625 // Data from cur_chunk will be copied to the start of dest_chunk_2.
626 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
627 } else {
628 // Destination chunks are the same; adjust destination_count.
629 destination_count -= 1;
630 if (chunk_offset(dest_addr) == 0) {
631 // Data from cur_chunk will be copied to the start of the destination
632 // chunk.
633 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
634 }
635 }
636 #endif // #if 0
637
638 _chunk_data[cur_chunk].set_destination_count(destination_count);
639 _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk));
640 dest_addr += words;
641 }
642
643 ++cur_chunk;
644 }
645
646 *target_next = dest_addr;
647 return true;
648 }
649
650 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
651 HeapWord* block_addr = block_to_addr(block_index);
652 HeapWord* block_end_addr = block_addr + BlockSize;
653 size_t chunk_index = addr_to_chunk_idx(block_addr);
654 HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index);
655
656 // An object that ends at the end of the block, ends
657 // in the block (the last word of the object is to
658 // the left of the end).
659 if ((block_addr < partial_obj_end_addr) &&
660 (partial_obj_end_addr <= block_end_addr)) {
661 return true;
662 }
663
664 return false;
665 }
666
667 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
668 HeapWord* result = NULL;
669 if (UseParallelOldGCChunkPointerCalc) {
670 result = chunk_calc_new_pointer(addr);
671 } else {
672 result = block_calc_new_pointer(addr);
673 }
674 return result;
675 }
676
677 // This method is overly complicated (expensive) to be called
678 // for every reference.
679 // Try to restructure this so that a NULL is returned if
680 // the object is dead. But don't wast the cycles to explicitly check
681 // that it is dead since only live objects should be passed in.
682
683 HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) {
684 assert(addr != NULL, "Should detect NULL oop earlier");
685 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
686 #ifdef ASSERT
687 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
688 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
689 }
690 #endif
691 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
692
693 // Chunk covering the object.
694 size_t chunk_index = addr_to_chunk_idx(addr);
695 const ChunkData* const chunk_ptr = chunk(chunk_index);
696 HeapWord* const chunk_addr = chunk_align_down(addr);
697
698 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
699 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
700
701 HeapWord* result = chunk_ptr->destination();
702
703 // If all the data in the chunk is live, then the new location of the object
704 // can be calculated from the destination of the chunk plus the offset of the
705 // object in the chunk.
706 if (chunk_ptr->data_size() == ChunkSize) {
707 result += pointer_delta(addr, chunk_addr);
708 return result;
709 }
710
711 // The new location of the object is
712 // chunk destination +
713 // size of the partial object extending onto the chunk +
714 // sizes of the live objects in the Chunk that are to the left of addr
715 const size_t partial_obj_size = chunk_ptr->partial_obj_size();
716 HeapWord* const search_start = chunk_addr + partial_obj_size;
717
718 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
719 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
720
721 result += partial_obj_size + live_to_left;
722 assert(result <= addr, "object cannot move to the right");
723 return result;
724 }
725
726 HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) {
727 assert(addr != NULL, "Should detect NULL oop earlier");
728 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
729 #ifdef ASSERT
730 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
731 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
732 }
733 #endif
734 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
735
736 // Chunk covering the object.
737 size_t chunk_index = addr_to_chunk_idx(addr);
738 const ChunkData* const chunk_ptr = chunk(chunk_index);
739 HeapWord* const chunk_addr = chunk_align_down(addr);
740
741 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
742 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
743
744 HeapWord* result = chunk_ptr->destination();
745
746 // If all the data in the chunk is live, then the new location of the object
747 // can be calculated from the destination of the chunk plus the offset of the
748 // object in the chunk.
749 if (chunk_ptr->data_size() == ChunkSize) {
750 result += pointer_delta(addr, chunk_addr);
751 return result;
752 }
753
754 // The new location of the object is
755 // chunk destination +
756 // block offset +
757 // sizes of the live objects in the Block that are to the left of addr
758 const size_t block_offset = addr_to_block_ptr(addr)->offset();
759 HeapWord* const search_start = chunk_addr + block_offset;
760
761 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
762 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
763
764 result += block_offset + live_to_left;
765 assert(result <= addr, "object cannot move to the right");
766 assert(result == chunk_calc_new_pointer(addr), "Should match");
767 return result;
768 }
769
770 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
771 klassOop updated_klass;
772 if (PSParallelCompact::should_update_klass(old_klass)) {
773 updated_klass = (klassOop) calc_new_pointer(old_klass);
774 } else {
775 updated_klass = old_klass;
776 }
777
778 return updated_klass;
779 }
780
781 #ifdef ASSERT
782 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
783 {
784 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
785 const size_t* const end = (const size_t*)vspace->committed_high_addr();
786 for (const size_t* p = beg; p < end; ++p) {
787 assert(*p == 0, "not zero");
788 }
789 }
790
791 void ParallelCompactData::verify_clear()
792 {
793 verify_clear(_chunk_vspace);
794 verify_clear(_block_vspace);
795 }
796 #endif // #ifdef ASSERT
797
798 #ifdef NOT_PRODUCT
799 ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) {
800 ParallelCompactData& sd = PSParallelCompact::summary_data();
801 return sd.chunk(chunk_index);
802 }
803 #endif
804
805 elapsedTimer PSParallelCompact::_accumulated_time;
806 unsigned int PSParallelCompact::_total_invocations = 0;
807 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
808 jlong PSParallelCompact::_time_of_last_gc = 0;
809 CollectorCounters* PSParallelCompact::_counters = NULL;
810 ParMarkBitMap PSParallelCompact::_mark_bitmap;
811 ParallelCompactData PSParallelCompact::_summary_data;
812
813 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
814 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
815 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
816
817 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
818 #ifdef VALIDATE_MARK_SWEEP
819 if (ValidateMarkSweep) {
820 if (!Universe::heap()->is_in_reserved(p)) {
821 _root_refs_stack->push(p);
822 } else {
823 _other_refs_stack->push(p);
824 }
825 }
826 #endif
827 mark_and_push(_compaction_manager, p);
828 }
829
830 void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
831 oop* p) {
832 assert(Universe::heap()->is_in_reserved(p),
833 "we should only be traversing objects here");
834 oop m = *p;
835 if (m != NULL && mark_bitmap()->is_unmarked(m)) {
836 if (mark_obj(m)) {
837 m->follow_contents(cm); // Follow contents of the marked object
838 }
839 }
840 }
841
842 // Anything associated with this variable is temporary.
843
844 void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
845 oop* p) {
846 // Push marked object, contents will be followed later
847 oop m = *p;
848 if (mark_obj(m)) {
849 // This thread marked the object and
850 // owns the subsequent processing of it.
851 cm->save_for_scanning(m);
852 }
853 }
854
855 void PSParallelCompact::post_initialize() {
856 ParallelScavengeHeap* heap = gc_heap();
857 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
858
859 MemRegion mr = heap->reserved_region();
860 _ref_processor = ReferenceProcessor::create_ref_processor(
861 mr, // span
862 true, // atomic_discovery
863 true, // mt_discovery
864 &_is_alive_closure,
865 ParallelGCThreads,
866 ParallelRefProcEnabled);
867 _counters = new CollectorCounters("PSParallelCompact", 1);
868
869 // Initialize static fields in ParCompactionManager.
870 ParCompactionManager::initialize(mark_bitmap());
871 }
872
873 bool PSParallelCompact::initialize() {
874 ParallelScavengeHeap* heap = gc_heap();
875 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
876 MemRegion mr = heap->reserved_region();
877
878 // Was the old gen get allocated successfully?
879 if (!heap->old_gen()->is_allocated()) {
880 return false;
881 }
882
883 initialize_space_info();
884 initialize_dead_wood_limiter();
885
886 if (!_mark_bitmap.initialize(mr)) {
887 vm_shutdown_during_initialization("Unable to allocate bit map for "
888 "parallel garbage collection for the requested heap size.");
889 return false;
890 }
891
892 if (!_summary_data.initialize(mr)) {
893 vm_shutdown_during_initialization("Unable to allocate tables for "
894 "parallel garbage collection for the requested heap size.");
895 return false;
896 }
897
898 return true;
899 }
900
901 void PSParallelCompact::initialize_space_info()
902 {
903 memset(&_space_info, 0, sizeof(_space_info));
904
905 ParallelScavengeHeap* heap = gc_heap();
906 PSYoungGen* young_gen = heap->young_gen();
907 MutableSpace* perm_space = heap->perm_gen()->object_space();
908
909 _space_info[perm_space_id].set_space(perm_space);
910 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
911 _space_info[eden_space_id].set_space(young_gen->eden_space());
912 _space_info[from_space_id].set_space(young_gen->from_space());
913 _space_info[to_space_id].set_space(young_gen->to_space());
914
915 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
916 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
917
918 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
919 if (TraceParallelOldGCDensePrefix) {
920 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
921 _space_info[perm_space_id].min_dense_prefix());
922 }
923 }
924
925 void PSParallelCompact::initialize_dead_wood_limiter()
926 {
927 const size_t max = 100;
928 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
929 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
930 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
931 DEBUG_ONLY(_dwl_initialized = true;)
932 _dwl_adjustment = normal_distribution(1.0);
933 }
934
935 // Simple class for storing info about the heap at the start of GC, to be used
936 // after GC for comparison/printing.
937 class PreGCValues {
938 public:
939 PreGCValues() { }
940 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
941
942 void fill(ParallelScavengeHeap* heap) {
943 _heap_used = heap->used();
944 _young_gen_used = heap->young_gen()->used_in_bytes();
945 _old_gen_used = heap->old_gen()->used_in_bytes();
946 _perm_gen_used = heap->perm_gen()->used_in_bytes();
947 };
948
949 size_t heap_used() const { return _heap_used; }
950 size_t young_gen_used() const { return _young_gen_used; }
951 size_t old_gen_used() const { return _old_gen_used; }
952 size_t perm_gen_used() const { return _perm_gen_used; }
953
954 private:
955 size_t _heap_used;
956 size_t _young_gen_used;
957 size_t _old_gen_used;
958 size_t _perm_gen_used;
959 };
960
961 void
962 PSParallelCompact::clear_data_covering_space(SpaceId id)
963 {
964 // At this point, top is the value before GC, new_top() is the value that will
965 // be set at the end of GC. The marking bitmap is cleared to top; nothing
966 // should be marked above top. The summary data is cleared to the larger of
967 // top & new_top.
968 MutableSpace* const space = _space_info[id].space();
969 HeapWord* const bot = space->bottom();
970 HeapWord* const top = space->top();
971 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
972
973 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
974 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
975 _mark_bitmap.clear_range(beg_bit, end_bit);
976
977 const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot);
978 const size_t end_chunk =
979 _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top));
980 _summary_data.clear_range(beg_chunk, end_chunk);
981 }
982
983 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
984 {
985 // Update the from & to space pointers in space_info, since they are swapped
986 // at each young gen gc. Do the update unconditionally (even though a
987 // promotion failure does not swap spaces) because an unknown number of minor
988 // collections will have swapped the spaces an unknown number of times.
989 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
990 ParallelScavengeHeap* heap = gc_heap();
991 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
992 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
993
994 pre_gc_values->fill(heap);
995
996 ParCompactionManager::reset();
997 NOT_PRODUCT(_mark_bitmap.reset_counters());
998 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
999 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
1000
1001 // Increment the invocation count
1002 heap->increment_total_collections();
1003
1004 // We need to track unique mark sweep invocations as well.
1005 _total_invocations++;
1006
1007 if (PrintHeapAtGC) {
1008 Universe::print_heap_before_gc();
1009 }
1010
1011 // Fill in TLABs
1012 heap->accumulate_statistics_all_tlabs();
1013 heap->ensure_parsability(true); // retire TLABs
1014
1015 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
1016 HandleMark hm; // Discard invalid handles created during verification
1017 gclog_or_tty->print(" VerifyBeforeGC:");
1018 Universe::verify(true);
1019 }
1020
1021 // Verify object start arrays
1022 if (VerifyObjectStartArray &&
1023 VerifyBeforeGC) {
1024 heap->old_gen()->verify_object_start_array();
1025 heap->perm_gen()->verify_object_start_array();
1026 }
1027
1028 DEBUG_ONLY(mark_bitmap()->verify_clear();)
1029 DEBUG_ONLY(summary_data().verify_clear();)
1030 }
1031
1032 void PSParallelCompact::post_compact()
1033 {
1034 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
1035
1036 // Clear the marking bitmap and summary data and update top() in each space.
1037 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
1038 clear_data_covering_space(SpaceId(id));
1039 _space_info[id].space()->set_top(_space_info[id].new_top());
1040 }
1041
1042 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1043 MutableSpace* const from_space = _space_info[from_space_id].space();
1044 MutableSpace* const to_space = _space_info[to_space_id].space();
1045
1046 ParallelScavengeHeap* heap = gc_heap();
1047 bool eden_empty = eden_space->is_empty();
1048 if (!eden_empty) {
1049 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1050 heap->young_gen(), heap->old_gen());
1051 }
1052
1053 // Update heap occupancy information which is used as input to the soft ref
1054 // clearing policy at the next gc.
1055 Universe::update_heap_info_at_gc();
1056
1057 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1058 to_space->is_empty();
1059
1060 BarrierSet* bs = heap->barrier_set();
1061 if (bs->is_a(BarrierSet::ModRef)) {
1062 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1063 MemRegion old_mr = heap->old_gen()->reserved();
1064 MemRegion perm_mr = heap->perm_gen()->reserved();
1065 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
1066
1067 if (young_gen_empty) {
1068 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
1069 } else {
1070 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
1071 }
1072 }
1073
1074 Threads::gc_epilogue();
1075 CodeCache::gc_epilogue();
1076
1077 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1078
1079 ref_processor()->enqueue_discovered_references(NULL);
1080
1081 // Update time of last GC
1082 reset_millis_since_last_gc();
1083 }
1084
1085 HeapWord*
1086 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1087 bool maximum_compaction)
1088 {
1089 const size_t chunk_size = ParallelCompactData::ChunkSize;
1090 const ParallelCompactData& sd = summary_data();
1091
1092 const MutableSpace* const space = _space_info[id].space();
1093 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
1094 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom());
1095 const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up);
1096
1097 // Skip full chunks at the beginning of the space--they are necessarily part
1098 // of the dense prefix.
1099 size_t full_count = 0;
1100 const ChunkData* cp;
1101 for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) {
1102 ++full_count;
1103 }
1104
1105 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1106 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1107 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1108 if (maximum_compaction || cp == end_cp || interval_ended) {
1109 _maximum_compaction_gc_num = total_invocations();
1110 return sd.chunk_to_addr(cp);
1111 }
1112
1113 HeapWord* const new_top = _space_info[id].new_top();
1114 const size_t space_live = pointer_delta(new_top, space->bottom());
1115 const size_t space_used = space->used_in_words();
1116 const size_t space_capacity = space->capacity_in_words();
1117
1118 const double cur_density = double(space_live) / space_capacity;
1119 const double deadwood_density =
1120 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1121 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1122
1123 if (TraceParallelOldGCDensePrefix) {
1124 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1125 cur_density, deadwood_density, deadwood_goal);
1126 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1127 "space_cap=" SIZE_FORMAT,
1128 space_live, space_used,
1129 space_capacity);
1130 }
1131
1132 // XXX - Use binary search?
1133 HeapWord* dense_prefix = sd.chunk_to_addr(cp);
1134 const ChunkData* full_cp = cp;
1135 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1);
1136 while (cp < end_cp) {
1137 HeapWord* chunk_destination = cp->destination();
1138 const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
1139 if (TraceParallelOldGCDensePrefix && Verbose) {
1140 tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " "
1141 "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
1142 sd.chunk(cp), chunk_destination,
1143 dense_prefix, cur_deadwood);
1144 }
1145
1146 if (cur_deadwood >= deadwood_goal) {
1147 // Found the chunk that has the correct amount of deadwood to the left.
1148 // This typically occurs after crossing a fairly sparse set of chunks, so
1149 // iterate backwards over those sparse chunks, looking for the chunk that
1150 // has the lowest density of live objects 'to the right.'
1151 size_t space_to_left = sd.chunk(cp) * chunk_size;
1152 size_t live_to_left = space_to_left - cur_deadwood;
1153 size_t space_to_right = space_capacity - space_to_left;
1154 size_t live_to_right = space_live - live_to_left;
1155 double density_to_right = double(live_to_right) / space_to_right;
1156 while (cp > full_cp) {
1157 --cp;
1158 const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
1159 const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
1160 double prev_chunk_density_to_right =
1161 double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
1162 if (density_to_right <= prev_chunk_density_to_right) {
1163 return dense_prefix;
1164 }
1165 if (TraceParallelOldGCDensePrefix && Verbose) {
1166 tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
1167 "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
1168 prev_chunk_density_to_right);
1169 }
1170 dense_prefix -= chunk_size;
1171 live_to_right = prev_chunk_live_to_right;
1172 space_to_right = prev_chunk_space_to_right;
1173 density_to_right = prev_chunk_density_to_right;
1174 }
1175 return dense_prefix;
1176 }
1177
1178 dense_prefix += chunk_size;
1179 ++cp;
1180 }
1181
1182 return dense_prefix;
1183 }
1184
1185 #ifndef PRODUCT
1186 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1187 const SpaceId id,
1188 const bool maximum_compaction,
1189 HeapWord* const addr)
1190 {
1191 const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr);
1192 ChunkData* const cp = summary_data().chunk(chunk_idx);
1193 const MutableSpace* const space = _space_info[id].space();
1194 HeapWord* const new_top = _space_info[id].new_top();
1195
1196 const size_t space_live = pointer_delta(new_top, space->bottom());
1197 const size_t dead_to_left = pointer_delta(addr, cp->destination());
1198 const size_t space_cap = space->capacity_in_words();
1199 const double dead_to_left_pct = double(dead_to_left) / space_cap;
1200 const size_t live_to_right = new_top - cp->destination();
1201 const size_t dead_to_right = space->top() - addr - live_to_right;
1202
1203 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " "
1204 "spl=" SIZE_FORMAT " "
1205 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1206 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1207 " ratio=%10.8f",
1208 algorithm, addr, chunk_idx,
1209 space_live,
1210 dead_to_left, dead_to_left_pct,
1211 dead_to_right, live_to_right,
1212 double(dead_to_right) / live_to_right);
1213 }
1214 #endif // #ifndef PRODUCT
1215
1216 // Return a fraction indicating how much of the generation can be treated as
1217 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
1218 // based on the density of live objects in the generation to determine a limit,
1219 // which is then adjusted so the return value is min_percent when the density is
1220 // 1.
1221 //
1222 // The following table shows some return values for a different values of the
1223 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1224 // min_percent is 1.
1225 //
1226 // fraction allowed as dead wood
1227 // -----------------------------------------------------------------
1228 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1229 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1230 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1231 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1232 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1233 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1234 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1235 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1236 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1237 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1238 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1239 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1240 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1241 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1242 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1243 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1244 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1245 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1246 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1247 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1248 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1249 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1250 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1251
1252 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1253 {
1254 assert(_dwl_initialized, "uninitialized");
1255
1256 // The raw limit is the value of the normal distribution at x = density.
1257 const double raw_limit = normal_distribution(density);
1258
1259 // Adjust the raw limit so it becomes the minimum when the density is 1.
1260 //
1261 // First subtract the adjustment value (which is simply the precomputed value
1262 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1263 // Then add the minimum value, so the minimum is returned when the density is
1264 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1265 const double min = double(min_percent) / 100.0;
1266 const double limit = raw_limit - _dwl_adjustment + min;
1267 return MAX2(limit, 0.0);
1268 }
1269
1270 ParallelCompactData::ChunkData*
1271 PSParallelCompact::first_dead_space_chunk(const ChunkData* beg,
1272 const ChunkData* end)
1273 {
1274 const size_t chunk_size = ParallelCompactData::ChunkSize;
1275 ParallelCompactData& sd = summary_data();
1276 size_t left = sd.chunk(beg);
1277 size_t right = end > beg ? sd.chunk(end) - 1 : left;
1278
1279 // Binary search.
1280 while (left < right) {
1281 // Equivalent to (left + right) / 2, but does not overflow.
1282 const size_t middle = left + (right - left) / 2;
1283 ChunkData* const middle_ptr = sd.chunk(middle);
1284 HeapWord* const dest = middle_ptr->destination();
1285 HeapWord* const addr = sd.chunk_to_addr(middle);
1286 assert(dest != NULL, "sanity");
1287 assert(dest <= addr, "must move left");
1288
1289 if (middle > left && dest < addr) {
1290 right = middle - 1;
1291 } else if (middle < right && middle_ptr->data_size() == chunk_size) {
1292 left = middle + 1;
1293 } else {
1294 return middle_ptr;
1295 }
1296 }
1297 return sd.chunk(left);
1298 }
1299
1300 ParallelCompactData::ChunkData*
1301 PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg,
1302 const ChunkData* end,
1303 size_t dead_words)
1304 {
1305 ParallelCompactData& sd = summary_data();
1306 size_t left = sd.chunk(beg);
1307 size_t right = end > beg ? sd.chunk(end) - 1 : left;
1308
1309 // Binary search.
1310 while (left < right) {
1311 // Equivalent to (left + right) / 2, but does not overflow.
1312 const size_t middle = left + (right - left) / 2;
1313 ChunkData* const middle_ptr = sd.chunk(middle);
1314 HeapWord* const dest = middle_ptr->destination();
1315 HeapWord* const addr = sd.chunk_to_addr(middle);
1316 assert(dest != NULL, "sanity");
1317 assert(dest <= addr, "must move left");
1318
1319 const size_t dead_to_left = pointer_delta(addr, dest);
1320 if (middle > left && dead_to_left > dead_words) {
1321 right = middle - 1;
1322 } else if (middle < right && dead_to_left < dead_words) {
1323 left = middle + 1;
1324 } else {
1325 return middle_ptr;
1326 }
1327 }
1328 return sd.chunk(left);
1329 }
1330
1331 // The result is valid during the summary phase, after the initial summarization
1332 // of each space into itself, and before final summarization.
1333 inline double
1334 PSParallelCompact::reclaimed_ratio(const ChunkData* const cp,
1335 HeapWord* const bottom,
1336 HeapWord* const top,
1337 HeapWord* const new_top)
1338 {
1339 ParallelCompactData& sd = summary_data();
1340
1341 assert(cp != NULL, "sanity");
1342 assert(bottom != NULL, "sanity");
1343 assert(top != NULL, "sanity");
1344 assert(new_top != NULL, "sanity");
1345 assert(top >= new_top, "summary data problem?");
1346 assert(new_top > bottom, "space is empty; should not be here");
1347 assert(new_top >= cp->destination(), "sanity");
1348 assert(top >= sd.chunk_to_addr(cp), "sanity");
1349
1350 HeapWord* const destination = cp->destination();
1351 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1352 const size_t compacted_region_live = pointer_delta(new_top, destination);
1353 const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp));
1354 const size_t reclaimable = compacted_region_used - compacted_region_live;
1355
1356 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1357 return double(reclaimable) / divisor;
1358 }
1359
1360 // Return the address of the end of the dense prefix, a.k.a. the start of the
1361 // compacted region. The address is always on a chunk boundary.
1362 //
1363 // Completely full chunks at the left are skipped, since no compaction can occur
1364 // in those chunks. Then the maximum amount of dead wood to allow is computed,
1365 // based on the density (amount live / capacity) of the generation; the chunk
1366 // with approximately that amount of dead space to the left is identified as the
1367 // limit chunk. Chunks between the last completely full chunk and the limit
1368 // chunk are scanned and the one that has the best (maximum) reclaimed_ratio()
1369 // is selected.
1370 HeapWord*
1371 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1372 bool maximum_compaction)
1373 {
1374 const size_t chunk_size = ParallelCompactData::ChunkSize;
1375 const ParallelCompactData& sd = summary_data();
1376
1377 const MutableSpace* const space = _space_info[id].space();
1378 HeapWord* const top = space->top();
1379 HeapWord* const top_aligned_up = sd.chunk_align_up(top);
1380 HeapWord* const new_top = _space_info[id].new_top();
1381 HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top);
1382 HeapWord* const bottom = space->bottom();
1383 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom);
1384 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
1385 const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up);
1386
1387 // Skip full chunks at the beginning of the space--they are necessarily part
1388 // of the dense prefix.
1389 const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp);
1390 assert(full_cp->destination() == sd.chunk_to_addr(full_cp) ||
1391 space->is_empty(), "no dead space allowed to the left");
1392 assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1,
1393 "chunk must have dead space");
1394
1395 // The gc number is saved whenever a maximum compaction is done, and used to
1396 // determine when the maximum compaction interval has expired. This avoids
1397 // successive max compactions for different reasons.
1398 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1399 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1400 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1401 total_invocations() == HeapFirstMaximumCompactionCount;
1402 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1403 _maximum_compaction_gc_num = total_invocations();
1404 return sd.chunk_to_addr(full_cp);
1405 }
1406
1407 const size_t space_live = pointer_delta(new_top, bottom);
1408 const size_t space_used = space->used_in_words();
1409 const size_t space_capacity = space->capacity_in_words();
1410
1411 const double density = double(space_live) / double(space_capacity);
1412 const size_t min_percent_free =
1413 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1414 const double limiter = dead_wood_limiter(density, min_percent_free);
1415 const size_t dead_wood_max = space_used - space_live;
1416 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1417 dead_wood_max);
1418
1419 if (TraceParallelOldGCDensePrefix) {
1420 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1421 "space_cap=" SIZE_FORMAT,
1422 space_live, space_used,
1423 space_capacity);
1424 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1425 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1426 density, min_percent_free, limiter,
1427 dead_wood_max, dead_wood_limit);
1428 }
1429
1430 // Locate the chunk with the desired amount of dead space to the left.
1431 const ChunkData* const limit_cp =
1432 dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit);
1433
1434 // Scan from the first chunk with dead space to the limit chunk and find the
1435 // one with the best (largest) reclaimed ratio.
1436 double best_ratio = 0.0;
1437 const ChunkData* best_cp = full_cp;
1438 for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) {
1439 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1440 if (tmp_ratio > best_ratio) {
1441 best_cp = cp;
1442 best_ratio = tmp_ratio;
1443 }
1444 }
1445
1446 #if 0
1447 // Something to consider: if the chunk with the best ratio is 'close to' the
1448 // first chunk w/free space, choose the first chunk with free space
1449 // ("first-free"). The first-free chunk is usually near the start of the
1450 // heap, which means we are copying most of the heap already, so copy a bit
1451 // more to get complete compaction.
1452 if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) {
1453 _maximum_compaction_gc_num = total_invocations();
1454 best_cp = full_cp;
1455 }
1456 #endif // #if 0
1457
1458 return sd.chunk_to_addr(best_cp);
1459 }
1460
1461 void PSParallelCompact::summarize_spaces_quick()
1462 {
1463 for (unsigned int i = 0; i < last_space_id; ++i) {
1464 const MutableSpace* space = _space_info[i].space();
1465 bool result = _summary_data.summarize(space->bottom(), space->end(),
1466 space->bottom(), space->top(),
1467 _space_info[i].new_top_addr());
1468 assert(result, "should never fail");
1469 _space_info[i].set_dense_prefix(space->bottom());
1470 }
1471 }
1472
1473 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1474 {
1475 HeapWord* const dense_prefix_end = dense_prefix(id);
1476 const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end);
1477 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1478 if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) {
1479 // Only enough dead space is filled so that any remaining dead space to the
1480 // left is larger than the minimum filler object. (The remainder is filled
1481 // during the copy/update phase.)
1482 //
1483 // The size of the dead space to the right of the boundary is not a
1484 // concern, since compaction will be able to use whatever space is
1485 // available.
1486 //
1487 // Here '||' is the boundary, 'x' represents a don't care bit and a box
1488 // surrounds the space to be filled with an object.
1489 //
1490 // In the 32-bit VM, each bit represents two 32-bit words:
1491 // +---+
1492 // a) beg_bits: ... x x x | 0 | || 0 x x ...
1493 // end_bits: ... x x x | 0 | || 0 x x ...
1494 // +---+
1495 //
1496 // In the 64-bit VM, each bit represents one 64-bit word:
1497 // +------------+
1498 // b) beg_bits: ... x x x | 0 || 0 | x x ...
1499 // end_bits: ... x x 1 | 0 || 0 | x x ...
1500 // +------------+
1501 // +-------+
1502 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
1503 // end_bits: ... x 1 | 0 0 | || 0 x x ...
1504 // +-------+
1505 // +-----------+
1506 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1507 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
1508 // +-----------+
1509 // +-------+
1510 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1511 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
1512 // +-------+
1513
1514 // Initially assume case a, c or e will apply.
1515 size_t obj_len = (size_t)oopDesc::header_size();
1516 HeapWord* obj_beg = dense_prefix_end - obj_len;
1517
1518 #ifdef _LP64
1519 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1520 // Case b above.
1521 obj_beg = dense_prefix_end - 1;
1522 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1523 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1524 // Case d above.
1525 obj_beg = dense_prefix_end - 3;
1526 obj_len = 3;
1527 }
1528 #endif // #ifdef _LP64
1529
1530 MemRegion region(obj_beg, obj_len);
1531 SharedHeap::fill_region_with_object(region);
1532 _mark_bitmap.mark_obj(obj_beg, obj_len);
1533 _summary_data.add_obj(obj_beg, obj_len);
1534 assert(start_array(id) != NULL, "sanity");
1535 start_array(id)->allocate_block(obj_beg);
1536 }
1537 }
1538
1539 void
1540 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1541 {
1542 assert(id < last_space_id, "id out of range");
1543
1544 const MutableSpace* space = _space_info[id].space();
1545 HeapWord** new_top_addr = _space_info[id].new_top_addr();
1546
1547 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1548 _space_info[id].set_dense_prefix(dense_prefix_end);
1549
1550 #ifndef PRODUCT
1551 if (TraceParallelOldGCDensePrefix) {
1552 print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end);
1553 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1554 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1555 }
1556 #endif // #ifndef PRODUCT
1557
1558 // If dead space crosses the dense prefix boundary, it is (at least partially)
1559 // filled with a dummy object, marked live and added to the summary data.
1560 // This simplifies the copy/update phase and must be done before the final
1561 // locations of objects are determined, to prevent leaving a fragment of dead
1562 // space that is too small to fill with an object.
1563 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1564 fill_dense_prefix_end(id);
1565 }
1566
1567 // Compute the destination of each Chunk, and thus each object.
1568 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1569 _summary_data.summarize(dense_prefix_end, space->end(),
1570 dense_prefix_end, space->top(),
1571 new_top_addr);
1572
1573 if (TraceParallelOldGCSummaryPhase) {
1574 const size_t chunk_size = ParallelCompactData::ChunkSize;
1575 const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
1576 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1577 const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr);
1578 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1579 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1580 "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1581 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1582 id, space->capacity_in_words(), dense_prefix_end,
1583 dp_chunk, dp_words / chunk_size,
1584 cr_words / chunk_size, *new_top_addr);
1585 }
1586 }
1587
1588 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1589 bool maximum_compaction)
1590 {
1591 EventMark m("2 summarize");
1592 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1593 // trace("2");
1594
1595 #ifdef ASSERT
1596 if (VerifyParallelOldWithMarkSweep &&
1597 (PSParallelCompact::total_invocations() %
1598 VerifyParallelOldWithMarkSweepInterval) == 0) {
1599 verify_mark_bitmap(_mark_bitmap);
1600 }
1601 if (TraceParallelOldGCMarkingPhase) {
1602 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1603 "add_obj_bytes=" SIZE_FORMAT,
1604 add_obj_count, add_obj_size * HeapWordSize);
1605 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1606 "mark_bitmap_bytes=" SIZE_FORMAT,
1607 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1608 }
1609 #endif // #ifdef ASSERT
1610
1611 // Quick summarization of each space into itself, to see how much is live.
1612 summarize_spaces_quick();
1613
1614 if (TraceParallelOldGCSummaryPhase) {
1615 tty->print_cr("summary_phase: after summarizing each space to self");
1616 Universe::print();
1617 NOT_PRODUCT(print_chunk_ranges());
1618 if (Verbose) {
1619 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1620 }
1621 }
1622
1623 // The amount of live data that will end up in old space (assuming it fits).
1624 size_t old_space_total_live = 0;
1625 unsigned int id;
1626 for (id = old_space_id; id < last_space_id; ++id) {
1627 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1628 _space_info[id].space()->bottom());
1629 }
1630
1631 const MutableSpace* old_space = _space_info[old_space_id].space();
1632 if (old_space_total_live > old_space->capacity_in_words()) {
1633 // XXX - should also try to expand
1634 maximum_compaction = true;
1635 } else if (!UseParallelOldGCDensePrefix) {
1636 maximum_compaction = true;
1637 }
1638
1639 // Permanent and Old generations.
1640 summarize_space(perm_space_id, maximum_compaction);
1641 summarize_space(old_space_id, maximum_compaction);
1642
1643 // Summarize the remaining spaces (those in the young gen) into old space. If
1644 // the live data from a space doesn't fit, the existing summarization is left
1645 // intact, so the data is compacted down within the space itself.
1646 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
1647 HeapWord* const target_space_end = old_space->end();
1648 for (id = eden_space_id; id < last_space_id; ++id) {
1649 const MutableSpace* space = _space_info[id].space();
1650 const size_t live = pointer_delta(_space_info[id].new_top(),
1651 space->bottom());
1652 const size_t available = pointer_delta(target_space_end, *new_top_addr);
1653 if (live <= available) {
1654 // All the live data will fit.
1655 if (TraceParallelOldGCSummaryPhase) {
1656 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
1657 id, *new_top_addr);
1658 }
1659 _summary_data.summarize(*new_top_addr, target_space_end,
1660 space->bottom(), space->top(),
1661 new_top_addr);
1662
1663 // Reset the new_top value for the space.
1664 _space_info[id].set_new_top(space->bottom());
1665
1666 // Clear the source_chunk field for each chunk in the space.
1667 ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
1668 ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1);
1669 while (beg_chunk <= end_chunk) {
1670 beg_chunk->set_source_chunk(0);
1671 ++beg_chunk;
1672 }
1673 }
1674 }
1675
1676 // Fill in the block data after any changes to the chunks have
1677 // been made.
1678 #ifdef ASSERT
1679 summarize_blocks(cm, perm_space_id);
1680 summarize_blocks(cm, old_space_id);
1681 #else
1682 if (!UseParallelOldGCChunkPointerCalc) {
1683 summarize_blocks(cm, perm_space_id);
1684 summarize_blocks(cm, old_space_id);
1685 }
1686 #endif
1687
1688 if (TraceParallelOldGCSummaryPhase) {
1689 tty->print_cr("summary_phase: after final summarization");
1690 Universe::print();
1691 NOT_PRODUCT(print_chunk_ranges());
1692 if (Verbose) {
1693 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1694 }
1695 }
1696 }
1697
1698 // Fill in the BlockData.
1699 // Iterate over the spaces and within each space iterate over
1700 // the chunks and fill in the BlockData for each chunk.
1701
1702 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
1703 SpaceId first_compaction_space_id) {
1704 #if 0
1705 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);)
1706 for (SpaceId cur_space_id = first_compaction_space_id;
1707 cur_space_id != last_space_id;
1708 cur_space_id = next_compaction_space_id(cur_space_id)) {
1709 // Iterate over the chunks in the space
1710 size_t start_chunk_index =
1711 _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom());
1712 BitBlockUpdateClosure bbu(mark_bitmap(),
1713 cm,
1714 start_chunk_index);
1715 // Iterate over blocks.
1716 for (size_t chunk_index = start_chunk_index;
1717 chunk_index < _summary_data.chunk_count() &&
1718 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
1719 chunk_index++) {
1720
1721 // Reset the closure for the new chunk. Note that the closure
1722 // maintains some data that does not get reset for each chunk
1723 // so a new instance of the closure is no appropriate.
1724 bbu.reset_chunk(chunk_index);
1725
1726 // Start the iteration with the first live object. This
1727 // may return the end of the chunk. That is acceptable since
1728 // it will properly limit the iterations.
1729 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
1730 _summary_data.first_live_or_end_in_chunk(chunk_index));
1731
1732 // End the iteration at the end of the chunk.
1733 HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index);
1734 HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize;
1735 ParMarkBitMap::idx_t right_offset =
1736 mark_bitmap()->addr_to_bit(chunk_end);
1737
1738 // Blocks that have not objects starting in them can be
1739 // skipped because their data will never be used.
1740 if (left_offset < right_offset) {
1741
1742 // Iterate through the objects in the chunk.
1743 ParMarkBitMap::idx_t last_offset =
1744 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
1745
1746 // If last_offset is less than right_offset, then the iterations
1747 // terminated while it was looking for an end bit. "last_offset"
1748 // is then the offset for the last start bit. In this situation
1749 // the "offset" field for the next block to the right (_cur_block + 1)
1750 // will not have been update although there may be live data
1751 // to the left of the chunk.
1752
1753 size_t cur_block_plus_1 = bbu.cur_block() + 1;
1754 HeapWord* cur_block_plus_1_addr =
1755 _summary_data.block_to_addr(bbu.cur_block()) +
1756 ParallelCompactData::BlockSize;
1757 HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset);
1758 #if 1 // This code works. The else doesn't but should. Why does it?
1759 // The current block (cur_block()) has already been updated.
1760 // The last block that may need to be updated is either the
1761 // next block (current block + 1) or the block where the
1762 // last object starts (which can be greater than the
1763 // next block if there were no objects found in intervening
1764 // blocks).
1765 size_t last_block =
1766 MAX2(bbu.cur_block() + 1,
1767 _summary_data.addr_to_block_idx(last_offset_addr));
1768 #else
1769 // The current block has already been updated. The only block
1770 // that remains to be updated is the block where the last
1771 // object in the chunk starts.
1772 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
1773 #endif
1774 assert_bit_is_start(last_offset);
1775 assert((last_block == _summary_data.block_count()) ||
1776 (_summary_data.block(last_block)->raw_offset() == 0),
1777 "Should not have been set");
1778 // Is the last block still in the current chunk? If still
1779 // in this chunk, update the last block (the counting that
1780 // included the current block is meant for the offset of the last
1781 // block). If not in this chunk, do nothing. Should not
1782 // update a block in the next chunk.
1783 if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(),
1784 last_block)) {
1785 if (last_offset < right_offset) {
1786 // The last object started in this chunk but ends beyond
1787 // this chunk. Update the block for this last object.
1788 assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
1789 // No end bit was found. The closure takes care of
1790 // the cases where
1791 // an objects crosses over into the next block
1792 // an objects starts and ends in the next block
1793 // It does not handle the case where an object is
1794 // the first object in a later block and extends
1795 // past the end of the chunk (i.e., the closure
1796 // only handles complete objects that are in the range
1797 // it is given). That object is handed back here
1798 // for any special consideration necessary.
1799 //
1800 // Is the first bit in the last block a start or end bit?
1801 //
1802 // If the partial object ends in the last block L,
1803 // then the 1st bit in L may be an end bit.
1804 //
1805 // Else does the last object start in a block after the current
1806 // block? A block AA will already have been updated if an
1807 // object ends in the next block AA+1. An object found to end in
1808 // the AA+1 is the trigger that updates AA. Objects are being
1809 // counted in the current block for updaing a following
1810 // block. An object may start in later block
1811 // block but may extend beyond the last block in the chunk.
1812 // Updates are only done when the end of an object has been
1813 // found. If the last object (covered by block L) starts
1814 // beyond the current block, then no object ends in L (otherwise
1815 // L would be the current block). So the first bit in L is
1816 // a start bit.
1817 //
1818 // Else the last objects start in the current block and ends
1819 // beyond the chunk. The current block has already been
1820 // updated and there is no later block (with an object
1821 // starting in it) that needs to be updated.
1822 //
1823 if (_summary_data.partial_obj_ends_in_block(last_block)) {
1824 _summary_data.block(last_block)->set_end_bit_offset(
1825 bbu.live_data_left());
1826 } else if (last_offset_addr >= cur_block_plus_1_addr) {
1827 // The start of the object is on a later block
1828 // (to the right of the current block and there are no
1829 // complete live objects to the left of this last object
1830 // within the chunk.
1831 // The first bit in the block is for the start of the
1832 // last object.
1833 _summary_data.block(last_block)->set_start_bit_offset(
1834 bbu.live_data_left());
1835 } else {
1836 // The start of the last object was found in
1837 // the current chunk (which has already
1838 // been updated).
1839 assert(bbu.cur_block() ==
1840 _summary_data.addr_to_block_idx(last_offset_addr),
1841 "Should be a block already processed");
1842 }
1843 #ifdef ASSERT
1844 // Is there enough block information to find this object?
1845 // The destination of the chunk has not been set so the
1846 // values returned by calc_new_pointer() and
1847 // block_calc_new_pointer() will only be
1848 // offsets. But they should agree.
1849 HeapWord* moved_obj_with_chunks =
1850 _summary_data.chunk_calc_new_pointer(last_offset_addr);
1851 HeapWord* moved_obj_with_blocks =
1852 _summary_data.calc_new_pointer(last_offset_addr);
1853 assert(moved_obj_with_chunks == moved_obj_with_blocks,
1854 "Block calculation is wrong");
1855 #endif
1856 } else if (last_block < _summary_data.block_count()) {
1857 // Iterations ended looking for a start bit (but
1858 // did not run off the end of the block table).
1859 _summary_data.block(last_block)->set_start_bit_offset(
1860 bbu.live_data_left());
1861 }
1862 }
1863 #ifdef ASSERT
1864 // Is there enough block information to find this object?
1865 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
1866 HeapWord* moved_obj_with_chunks =
1867 _summary_data.calc_new_pointer(left_offset_addr);
1868 HeapWord* moved_obj_with_blocks =
1869 _summary_data.calc_new_pointer(left_offset_addr);
1870 assert(moved_obj_with_chunks == moved_obj_with_blocks,
1871 "Block calculation is wrong");
1872 #endif
1873
1874 // Is there another block after the end of this chunk?
1875 #ifdef ASSERT
1876 if (last_block < _summary_data.block_count()) {
1877 // No object may have been found in a block. If that
1878 // block is at the end of the chunk, the iteration will
1879 // terminate without incrementing the current block so
1880 // that the current block is not the last block in the
1881 // chunk. That situation precludes asserting that the
1882 // current block is the last block in the chunk. Assert
1883 // the lesser condition that the current block does not
1884 // exceed the chunk.
1885 assert(_summary_data.block_to_addr(last_block) <=
1886 (_summary_data.chunk_to_addr(chunk_index) +
1887 ParallelCompactData::ChunkSize),
1888 "Chunk and block inconsistency");
1889 assert(last_offset <= right_offset, "Iteration over ran end");
1890 }
1891 #endif
1892 }
1893 #ifdef ASSERT
1894 if (PrintGCDetails && Verbose) {
1895 if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) {
1896 size_t first_block =
1897 chunk_index / ParallelCompactData::BlocksPerChunk;
1898 gclog_or_tty->print_cr("first_block " PTR_FORMAT
1899 " _offset " PTR_FORMAT
1900 "_first_is_start_bit %d",
1901 first_block,
1902 _summary_data.block(first_block)->raw_offset(),
1903 _summary_data.block(first_block)->first_is_start_bit());
1904 }
1905 }
1906 #endif
1907 }
1908 }
1909 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);)
1910 #endif // #if 0
1911 }
1912
1913 // This method should contain all heap-specific policy for invoking a full
1914 // collection. invoke_no_policy() will only attempt to compact the heap; it
1915 // will do nothing further. If we need to bail out for policy reasons, scavenge
1916 // before full gc, or any other specialized behavior, it needs to be added here.
1917 //
1918 // Note that this method should only be called from the vm_thread while at a
1919 // safepoint.
1920 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1921 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1922 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1923 "should be in vm thread");
1924 ParallelScavengeHeap* heap = gc_heap();
1925 GCCause::Cause gc_cause = heap->gc_cause();
1926 assert(!heap->is_gc_active(), "not reentrant");
1927
1928 PSAdaptiveSizePolicy* policy = heap->size_policy();
1929
1930 // Before each allocation/collection attempt, find out from the
1931 // policy object if GCs are, on the whole, taking too long. If so,
1932 // bail out without attempting a collection. The exceptions are
1933 // for explicitly requested GC's.
1934 if (!policy->gc_time_limit_exceeded() ||
1935 GCCause::is_user_requested_gc(gc_cause) ||
1936 GCCause::is_serviceability_requested_gc(gc_cause)) {
1937 IsGCActiveMark mark;
1938
1939 if (ScavengeBeforeFullGC) {
1940 PSScavenge::invoke_no_policy();
1941 }
1942
1943 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
1944 }
1945 }
1946
1947 bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) {
1948 size_t addr_chunk_index = addr_to_chunk_idx(addr);
1949 return chunk_index == addr_chunk_index;
1950 }
1951
1952 bool ParallelCompactData::chunk_contains_block(size_t chunk_index,
1953 size_t block_index) {
1954 size_t first_block_in_chunk = chunk_index * BlocksPerChunk;
1955 size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1;
1956
1957 return (first_block_in_chunk <= block_index) &&
1958 (block_index <= last_block_in_chunk);
1959 }
1960
1961 // This method contains no policy. You should probably
1962 // be calling invoke() instead.
1963 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1964 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1965 assert(ref_processor() != NULL, "Sanity");
1966
1967 if (GC_locker::is_active()) {
1968 return;
1969 }
1970
1971 TimeStamp marking_start;
1972 TimeStamp compaction_start;
1973 TimeStamp collection_exit;
1974
1975 // "serial_CM" is needed until the parallel implementation
1976 // of the move and update is done.
1977 ParCompactionManager* serial_CM = new ParCompactionManager();
1978 // Don't initialize more than once.
1979 // serial_CM->initialize(&summary_data(), mark_bitmap());
1980
1981 ParallelScavengeHeap* heap = gc_heap();
1982 GCCause::Cause gc_cause = heap->gc_cause();
1983 PSYoungGen* young_gen = heap->young_gen();
1984 PSOldGen* old_gen = heap->old_gen();
1985 PSPermGen* perm_gen = heap->perm_gen();
1986 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1987
1988 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
1989
1990 // Make sure data structures are sane, make the heap parsable, and do other
1991 // miscellaneous bookkeeping.
1992 PreGCValues pre_gc_values;
1993 pre_compact(&pre_gc_values);
1994
1995 // Place after pre_compact() where the number of invocations is incremented.
1996 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
1997
1998 {
1999 ResourceMark rm;
2000 HandleMark hm;
2001
2002 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
2003
2004 // This is useful for debugging but don't change the output the
2005 // the customer sees.
2006 const char* gc_cause_str = "Full GC";
2007 if (is_system_gc && PrintGCDetails) {
2008 gc_cause_str = "Full GC (System)";
2009 }
2010 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2011 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2012 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
2013 TraceCollectorStats tcs(counters());
2014 TraceMemoryManagerStats tms(true /* Full GC */);
2015
2016 if (TraceGen1Time) accumulated_time()->start();
2017
2018 // Let the size policy know we're starting
2019 size_policy->major_collection_begin();
2020
2021 // When collecting the permanent generation methodOops may be moving,
2022 // so we either have to flush all bcp data or convert it into bci.
2023 CodeCache::gc_prologue();
2024 Threads::gc_prologue();
2025
2026 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2027 COMPILER2_PRESENT(DerivedPointerTable::clear());
2028
2029 ref_processor()->enable_discovery();
2030
2031 bool marked_for_unloading = false;
2032
2033 marking_start.update();
2034 marking_phase(serial_CM, maximum_heap_compaction);
2035
2036 #ifndef PRODUCT
2037 if (TraceParallelOldGCMarkingPhase) {
2038 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
2039 "cas_by_another %d",
2040 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
2041 mark_bitmap()->cas_by_another());
2042 }
2043 #endif // #ifndef PRODUCT
2044
2045 #ifdef ASSERT
2046 if (VerifyParallelOldWithMarkSweep &&
2047 (PSParallelCompact::total_invocations() %
2048 VerifyParallelOldWithMarkSweepInterval) == 0) {
2049 gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()");
2050 if (PrintGCDetails && Verbose) {
2051 gclog_or_tty->print_cr("mark_sweep_phase1:");
2052 }
2053 // Clear the discovered lists so that discovered objects
2054 // don't look like they have been discovered twice.
2055 ref_processor()->clear_discovered_references();
2056
2057 PSMarkSweep::allocate_stacks();
2058 MemRegion mr = Universe::heap()->reserved_region();
2059 PSMarkSweep::ref_processor()->enable_discovery();
2060 PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction);
2061 }
2062 #endif
2063
2064 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
2065 summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc);
2066
2067 #ifdef ASSERT
2068 if (VerifyParallelOldWithMarkSweep &&
2069 (PSParallelCompact::total_invocations() %
2070 VerifyParallelOldWithMarkSweepInterval) == 0) {
2071 if (PrintGCDetails && Verbose) {
2072 gclog_or_tty->print_cr("mark_sweep_phase2:");
2073 }
2074 PSMarkSweep::mark_sweep_phase2();
2075 }
2076 #endif
2077
2078 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2079 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2080
2081 // adjust_roots() updates Universe::_intArrayKlassObj which is
2082 // needed by the compaction for filling holes in the dense prefix.
2083 adjust_roots();
2084
2085 #ifdef ASSERT
2086 if (VerifyParallelOldWithMarkSweep &&
2087 (PSParallelCompact::total_invocations() %
2088 VerifyParallelOldWithMarkSweepInterval) == 0) {
2089 // Do a separate verify phase so that the verify
2090 // code can use the the forwarding pointers to
2091 // check the new pointer calculation. The restore_marks()
2092 // has to be done before the real compact.
2093 serial_CM->set_action(ParCompactionManager::VerifyUpdate);
2094 compact_perm(serial_CM);
2095 compact_serial(serial_CM);
2096 serial_CM->set_action(ParCompactionManager::ResetObjects);
2097 compact_perm(serial_CM);
2098 compact_serial(serial_CM);
2099 serial_CM->set_action(ParCompactionManager::UpdateAndCopy);
2100
2101 // For debugging only
2102 PSMarkSweep::restore_marks();
2103 PSMarkSweep::deallocate_stacks();
2104 }
2105 #endif
2106
2107 compaction_start.update();
2108 // Does the perm gen always have to be done serially because
2109 // klasses are used in the update of an object?
2110 compact_perm(serial_CM);
2111
2112 if (UseParallelOldGCCompacting) {
2113 compact();
2114 } else {
2115 compact_serial(serial_CM);
2116 }
2117
2118 delete serial_CM;
2119
2120 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
2121 // done before resizing.
2122 post_compact();
2123
2124 // Let the size policy know we're done
2125 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
2126
2127 if (UseAdaptiveSizePolicy) {
2128 if (PrintAdaptiveSizePolicy) {
2129 gclog_or_tty->print("AdaptiveSizeStart: ");
2130 gclog_or_tty->stamp();
2131 gclog_or_tty->print_cr(" collection: %d ",
2132 heap->total_collections());
2133 if (Verbose) {
2134 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
2135 " perm_gen_capacity: %d ",
2136 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
2137 perm_gen->capacity_in_bytes());
2138 }
2139 }
2140
2141 // Don't check if the size_policy is ready here. Let
2142 // the size_policy check that internally.
2143 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
2144 ((gc_cause != GCCause::_java_lang_system_gc) ||
2145 UseAdaptiveSizePolicyWithSystemGC)) {
2146 // Calculate optimal free space amounts
2147 assert(young_gen->max_size() >
2148 young_gen->from_space()->capacity_in_bytes() +
2149 young_gen->to_space()->capacity_in_bytes(),
2150 "Sizes of space in young gen are out-of-bounds");
2151 size_t max_eden_size = young_gen->max_size() -
2152 young_gen->from_space()->capacity_in_bytes() -
2153 young_gen->to_space()->capacity_in_bytes();
2154 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
2155 young_gen->eden_space()->used_in_bytes(),
2156 old_gen->used_in_bytes(),
2157 perm_gen->used_in_bytes(),
2158 young_gen->eden_space()->capacity_in_bytes(),
2159 old_gen->max_gen_size(),
2160 max_eden_size,
2161 true /* full gc*/,
2162 gc_cause);
2163
2164 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
2165
2166 // Don't resize the young generation at an major collection. A
2167 // desired young generation size may have been calculated but
2168 // resizing the young generation complicates the code because the
2169 // resizing of the old generation may have moved the boundary
2170 // between the young generation and the old generation. Let the
2171 // young generation resizing happen at the minor collections.
2172 }
2173 if (PrintAdaptiveSizePolicy) {
2174 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
2175 heap->total_collections());
2176 }
2177 }
2178
2179 if (UsePerfData) {
2180 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
2181 counters->update_counters();
2182 counters->update_old_capacity(old_gen->capacity_in_bytes());
2183 counters->update_young_capacity(young_gen->capacity_in_bytes());
2184 }
2185
2186 heap->resize_all_tlabs();
2187
2188 // We collected the perm gen, so we'll resize it here.
2189 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
2190
2191 if (TraceGen1Time) accumulated_time()->stop();
2192
2193 if (PrintGC) {
2194 if (PrintGCDetails) {
2195 // No GC timestamp here. This is after GC so it would be confusing.
2196 young_gen->print_used_change(pre_gc_values.young_gen_used());
2197 old_gen->print_used_change(pre_gc_values.old_gen_used());
2198 heap->print_heap_change(pre_gc_values.heap_used());
2199 // Print perm gen last (print_heap_change() excludes the perm gen).
2200 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
2201 } else {
2202 heap->print_heap_change(pre_gc_values.heap_used());
2203 }
2204 }
2205
2206 // Track memory usage and detect low memory
2207 MemoryService::track_memory_usage();
2208 heap->update_counters();
2209
2210 if (PrintGCDetails) {
2211 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
2212 if (size_policy->gc_time_limit_exceeded()) {
2213 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
2214 "of %d%%", GCTimeLimit);
2215 } else {
2216 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
2217 "of %d%%", GCTimeLimit);
2218 }
2219 }
2220 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
2221 }
2222 }
2223
2224 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2225 HandleMark hm; // Discard invalid handles created during verification
2226 gclog_or_tty->print(" VerifyAfterGC:");
2227 Universe::verify(false);
2228 }
2229
2230 // Re-verify object start arrays
2231 if (VerifyObjectStartArray &&
2232 VerifyAfterGC) {
2233 old_gen->verify_object_start_array();
2234 perm_gen->verify_object_start_array();
2235 }
2236
2237 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2238
2239 collection_exit.update();
2240
2241 if (PrintHeapAtGC) {
2242 Universe::print_heap_after_gc();
2243 }
2244 if (PrintGCTaskTimeStamps) {
2245 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2246 INT64_FORMAT,
2247 marking_start.ticks(), compaction_start.ticks(),
2248 collection_exit.ticks());
2249 gc_task_manager()->print_task_time_stamps();
2250 }
2251 }
2252
2253 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2254 PSYoungGen* young_gen,
2255 PSOldGen* old_gen) {
2256 MutableSpace* const eden_space = young_gen->eden_space();
2257 assert(!eden_space->is_empty(), "eden must be non-empty");
2258 assert(young_gen->virtual_space()->alignment() ==
2259 old_gen->virtual_space()->alignment(), "alignments do not match");
2260
2261 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2262 return false;
2263 }
2264
2265 // Both generations must be completely committed.
2266 if (young_gen->virtual_space()->uncommitted_size() != 0) {
2267 return false;
2268 }
2269 if (old_gen->virtual_space()->uncommitted_size() != 0) {
2270 return false;
2271 }
2272
2273 // Figure out how much to take from eden. Include the average amount promoted
2274 // in the total; otherwise the next young gen GC will simply bail out to a
2275 // full GC.
2276 const size_t alignment = old_gen->virtual_space()->alignment();
2277 const size_t eden_used = eden_space->used_in_bytes();
2278 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2279 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2280 const size_t eden_capacity = eden_space->capacity_in_bytes();
2281
2282 if (absorb_size >= eden_capacity) {
2283 return false; // Must leave some space in eden.
2284 }
2285
2286 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2287 if (new_young_size < young_gen->min_gen_size()) {
2288 return false; // Respect young gen minimum size.
2289 }
2290
2291 if (TraceAdaptiveGCBoundary && Verbose) {
2292 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
2293 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2294 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2295 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2296 absorb_size / K,
2297 eden_capacity / K, (eden_capacity - absorb_size) / K,
2298 young_gen->from_space()->used_in_bytes() / K,
2299 young_gen->to_space()->used_in_bytes() / K,
2300 young_gen->capacity_in_bytes() / K, new_young_size / K);
2301 }
2302
2303 // Fill the unused part of the old gen.
2304 MutableSpace* const old_space = old_gen->object_space();
2305 MemRegion old_gen_unused(old_space->top(), old_space->end());
2306 if (!old_gen_unused.is_empty()) {
2307 SharedHeap::fill_region_with_object(old_gen_unused);
2308 }
2309
2310 // Take the live data from eden and set both top and end in the old gen to
2311 // eden top. (Need to set end because reset_after_change() mangles the region
2312 // from end to virtual_space->high() in debug builds).
2313 HeapWord* const new_top = eden_space->top();
2314 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2315 absorb_size);
2316 young_gen->reset_after_change();
2317 old_space->set_top(new_top);
2318 old_space->set_end(new_top);
2319 old_gen->reset_after_change();
2320
2321 // Update the object start array for the filler object and the data from eden.
2322 ObjectStartArray* const start_array = old_gen->start_array();
2323 HeapWord* const start = old_gen_unused.start();
2324 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
2325 start_array->allocate_block(addr);
2326 }
2327
2328 // Could update the promoted average here, but it is not typically updated at
2329 // full GCs and the value to use is unclear. Something like
2330 //
2331 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2332
2333 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2334 return true;
2335 }
2336
2337 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2338 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2339 "shouldn't return NULL");
2340 return ParallelScavengeHeap::gc_task_manager();
2341 }
2342
2343 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2344 bool maximum_heap_compaction) {
2345 // Recursively traverse all live objects and mark them
2346 EventMark m("1 mark object");
2347 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
2348
2349 ParallelScavengeHeap* heap = gc_heap();
2350 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2351 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
2352 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2353
2354 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2355 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2356
2357 {
2358 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
2359
2360 GCTaskQueue* q = GCTaskQueue::create();
2361
2362 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2363 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2364 // We scan the thread roots in parallel
2365 Threads::create_thread_roots_marking_tasks(q);
2366 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2367 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2368 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2369 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2370 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2371 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
2372
2373 if (parallel_gc_threads > 1) {
2374 for (uint j = 0; j < parallel_gc_threads; j++) {
2375 q->enqueue(new StealMarkingTask(&terminator));
2376 }
2377 }
2378
2379 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2380 q->enqueue(fin);
2381
2382 gc_task_manager()->add_list(q);
2383
2384 fin->wait_for();
2385
2386 // We have to release the barrier tasks!
2387 WaitForBarrierGCTask::destroy(fin);
2388 }
2389
2390 // Process reference objects found during marking
2391 {
2392 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
2393 ReferencePolicy *soft_ref_policy;
2394 if (maximum_heap_compaction) {
2395 soft_ref_policy = new AlwaysClearPolicy();
2396 } else {
2397 #ifdef COMPILER2
2398 soft_ref_policy = new LRUMaxHeapPolicy();
2399 #else
2400 soft_ref_policy = new LRUCurrentHeapPolicy();
2401 #endif // COMPILER2
2402 }
2403 assert(soft_ref_policy != NULL, "No soft reference policy");
2404 if (ref_processor()->processing_is_mt()) {
2405 RefProcTaskExecutor task_executor;
2406 ref_processor()->process_discovered_references(
2407 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
2408 &follow_stack_closure, &task_executor);
2409 } else {
2410 ref_processor()->process_discovered_references(
2411 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
2412 &follow_stack_closure, NULL);
2413 }
2414 }
2415
2416 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
2417 // Follow system dictionary roots and unload classes.
2418 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2419
2420 // Follow code cache roots.
2421 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
2422 purged_class);
2423 follow_stack(cm); // Flush marking stack.
2424
2425 // Update subklass/sibling/implementor links of live klasses
2426 // revisit_klass_stack is used in follow_weak_klass_links().
2427 follow_weak_klass_links(cm);
2428
2429 // Visit symbol and interned string tables and delete unmarked oops
2430 SymbolTable::unlink(is_alive_closure());
2431 StringTable::unlink(is_alive_closure());
2432
2433 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
2434 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
2435 }
2436
2437 // This should be moved to the shared markSweep code!
2438 class PSAlwaysTrueClosure: public BoolObjectClosure {
2439 public:
2440 void do_object(oop p) { ShouldNotReachHere(); }
2441 bool do_object_b(oop p) { return true; }
2442 };
2443 static PSAlwaysTrueClosure always_true;
2444
2445 void PSParallelCompact::adjust_roots() {
2446 // Adjust the pointers to reflect the new locations
2447 EventMark m("3 adjust roots");
2448 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
2449
2450 // General strong roots.
2451 Universe::oops_do(adjust_root_pointer_closure());
2452 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
2453 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
2454 Threads::oops_do(adjust_root_pointer_closure());
2455 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
2456 FlatProfiler::oops_do(adjust_root_pointer_closure());
2457 Management::oops_do(adjust_root_pointer_closure());
2458 JvmtiExport::oops_do(adjust_root_pointer_closure());
2459 // SO_AllClasses
2460 SystemDictionary::oops_do(adjust_root_pointer_closure());
2461 vmSymbols::oops_do(adjust_root_pointer_closure());
2462
2463 // Now adjust pointers in remaining weak roots. (All of which should
2464 // have been cleared if they pointed to non-surviving objects.)
2465 // Global (weak) JNI handles
2466 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
2467
2468 CodeCache::oops_do(adjust_pointer_closure());
2469 SymbolTable::oops_do(adjust_root_pointer_closure());
2470 StringTable::oops_do(adjust_root_pointer_closure());
2471 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
2472 // Roots were visited so references into the young gen in roots
2473 // may have been scanned. Process them also.
2474 // Should the reference processor have a span that excludes
2475 // young gen objects?
2476 PSScavenge::reference_processor()->weak_oops_do(
2477 adjust_root_pointer_closure());
2478 }
2479
2480 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
2481 EventMark m("4 compact perm");
2482 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
2483 // trace("4");
2484
2485 gc_heap()->perm_gen()->start_array()->reset();
2486 move_and_update(cm, perm_space_id);
2487 }
2488
2489 void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
2490 uint parallel_gc_threads) {
2491 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
2492
2493 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
2494 for (unsigned int j = 0; j < task_count; j++) {
2495 q->enqueue(new DrainStacksCompactionTask());
2496 }
2497
2498 // Find all chunks that are available (can be filled immediately) and
2499 // distribute them to the thread stacks. The iteration is done in reverse
2500 // order (high to low) so the chunks will be removed in ascending order.
2501
2502 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2503
2504 size_t fillable_chunks = 0; // A count for diagnostic purposes.
2505 unsigned int which = 0; // The worker thread number.
2506
2507 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2508 SpaceInfo* const space_info = _space_info + id;
2509 MutableSpace* const space = space_info->space();
2510 HeapWord* const new_top = space_info->new_top();
2511
2512 const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix());
2513 const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top));
2514 assert(end_chunk > 0, "perm gen cannot be empty");
2515
2516 for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) {
2517 if (sd.chunk(cur)->claim_unsafe()) {
2518 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
2519 cm->save_for_processing(cur);
2520
2521 if (TraceParallelOldGCCompactionPhase && Verbose) {
2522 const size_t count_mod_8 = fillable_chunks & 7;
2523 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2524 gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
2525 if (count_mod_8 == 7) gclog_or_tty->cr();
2526 }
2527
2528 NOT_PRODUCT(++fillable_chunks;)
2529
2530 // Assign chunks to threads in round-robin fashion.
2531 if (++which == task_count) {
2532 which = 0;
2533 }
2534 }
2535 }
2536 }
2537
2538 if (TraceParallelOldGCCompactionPhase) {
2539 if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr();
2540 gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks);
2541 }
2542 }
2543
2544 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2545
2546 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2547 uint parallel_gc_threads) {
2548 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2549
2550 ParallelCompactData& sd = PSParallelCompact::summary_data();
2551
2552 // Iterate over all the spaces adding tasks for updating
2553 // chunks in the dense prefix. Assume that 1 gc thread
2554 // will work on opening the gaps and the remaining gc threads
2555 // will work on the dense prefix.
2556 SpaceId space_id = old_space_id;
2557 while (space_id != last_space_id) {
2558 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2559 const MutableSpace* const space = _space_info[space_id].space();
2560
2561 if (dense_prefix_end == space->bottom()) {
2562 // There is no dense prefix for this space.
2563 space_id = next_compaction_space_id(space_id);
2564 continue;
2565 }
2566
2567 // The dense prefix is before this chunk.
2568 size_t chunk_index_end_dense_prefix =
2569 sd.addr_to_chunk_idx(dense_prefix_end);
2570 ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix);
2571 assert(dense_prefix_end == space->end() ||
2572 dense_prefix_cp->available() ||
2573 dense_prefix_cp->claimed(),
2574 "The chunk after the dense prefix should always be ready to fill");
2575
2576 size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom());
2577
2578 // Is there dense prefix work?
2579 size_t total_dense_prefix_chunks =
2580 chunk_index_end_dense_prefix - chunk_index_start;
2581 // How many chunks of the dense prefix should be given to
2582 // each thread?
2583 if (total_dense_prefix_chunks > 0) {
2584 uint tasks_for_dense_prefix = 1;
2585 if (UseParallelDensePrefixUpdate) {
2586 if (total_dense_prefix_chunks <=
2587 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2588 // Don't over partition. This assumes that
2589 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2590 // so there are not many chunks to process.
2591 tasks_for_dense_prefix = parallel_gc_threads;
2592 } else {
2593 // Over partition
2594 tasks_for_dense_prefix = parallel_gc_threads *
2595 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2596 }
2597 }
2598 size_t chunks_per_thread = total_dense_prefix_chunks /
2599 tasks_for_dense_prefix;
2600 // Give each thread at least 1 chunk.
2601 if (chunks_per_thread == 0) {
2602 chunks_per_thread = 1;
2603 }
2604
2605 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2606 if (chunk_index_start >= chunk_index_end_dense_prefix) {
2607 break;
2608 }
2609 // chunk_index_end is not processed
2610 size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread,
2611 chunk_index_end_dense_prefix);
2612 q->enqueue(new UpdateDensePrefixTask(
2613 space_id,
2614 chunk_index_start,
2615 chunk_index_end));
2616 chunk_index_start = chunk_index_end;
2617 }
2618 }
2619 // This gets any part of the dense prefix that did not
2620 // fit evenly.
2621 if (chunk_index_start < chunk_index_end_dense_prefix) {
2622 q->enqueue(new UpdateDensePrefixTask(
2623 space_id,
2624 chunk_index_start,
2625 chunk_index_end_dense_prefix));
2626 }
2627 space_id = next_compaction_space_id(space_id);
2628 } // End tasks for dense prefix
2629 }
2630
2631 void PSParallelCompact::enqueue_chunk_stealing_tasks(
2632 GCTaskQueue* q,
2633 ParallelTaskTerminator* terminator_ptr,
2634 uint parallel_gc_threads) {
2635 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2636
2637 // Once a thread has drained it's stack, it should try to steal chunks from
2638 // other threads.
2639 if (parallel_gc_threads > 1) {
2640 for (uint j = 0; j < parallel_gc_threads; j++) {
2641 q->enqueue(new StealChunkCompactionTask(terminator_ptr));
2642 }
2643 }
2644 }
2645
2646 void PSParallelCompact::compact() {
2647 EventMark m("5 compact");
2648 // trace("5");
2649 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
2650
2651 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2652 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2653 PSOldGen* old_gen = heap->old_gen();
2654 old_gen->start_array()->reset();
2655 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2656 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
2657 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2658
2659 GCTaskQueue* q = GCTaskQueue::create();
2660 enqueue_chunk_draining_tasks(q, parallel_gc_threads);
2661 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
2662 enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads);
2663
2664 {
2665 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2666
2667 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2668 q->enqueue(fin);
2669
2670 gc_task_manager()->add_list(q);
2671
2672 fin->wait_for();
2673
2674 // We have to release the barrier tasks!
2675 WaitForBarrierGCTask::destroy(fin);
2676
2677 #ifdef ASSERT
2678 // Verify that all chunks have been processed before the deferred updates.
2679 // Note that perm_space_id is skipped; this type of verification is not
2680 // valid until the perm gen is compacted by chunks.
2681 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2682 verify_complete(SpaceId(id));
2683 }
2684 #endif
2685 }
2686
2687 {
2688 // Update the deferred objects, if any. Any compaction manager can be used.
2689 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
2690 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2691 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2692 update_deferred_objects(cm, SpaceId(id));
2693 }
2694 }
2695 }
2696
2697 #ifdef ASSERT
2698 void PSParallelCompact::verify_complete(SpaceId space_id) {
2699 // All Chunks between space bottom() to new_top() should be marked as filled
2700 // and all Chunks between new_top() and top() should be available (i.e.,
2701 // should have been emptied).
2702 ParallelCompactData& sd = summary_data();
2703 SpaceInfo si = _space_info[space_id];
2704 HeapWord* new_top_addr = sd.chunk_align_up(si.new_top());
2705 HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top());
2706 const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom());
2707 const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr);
2708 const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr);
2709
2710 bool issued_a_warning = false;
2711
2712 size_t cur_chunk;
2713 for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) {
2714 const ChunkData* const c = sd.chunk(cur_chunk);
2715 if (!c->completed()) {
2716 warning("chunk " SIZE_FORMAT " not filled: "
2717 "destination_count=" SIZE_FORMAT,
2718 cur_chunk, c->destination_count());
2719 issued_a_warning = true;
2720 }
2721 }
2722
2723 for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) {
2724 const ChunkData* const c = sd.chunk(cur_chunk);
2725 if (!c->available()) {
2726 warning("chunk " SIZE_FORMAT " not empty: "
2727 "destination_count=" SIZE_FORMAT,
2728 cur_chunk, c->destination_count());
2729 issued_a_warning = true;
2730 }
2731 }
2732
2733 if (issued_a_warning) {
2734 print_chunk_ranges();
2735 }
2736 }
2737 #endif // #ifdef ASSERT
2738
2739 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
2740 EventMark m("5 compact serial");
2741 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
2742
2743 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2744 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2745
2746 PSYoungGen* young_gen = heap->young_gen();
2747 PSOldGen* old_gen = heap->old_gen();
2748
2749 old_gen->start_array()->reset();
2750 old_gen->move_and_update(cm);
2751 young_gen->move_and_update(cm);
2752 }
2753
2754 void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
2755 assert(!Universe::heap()->is_in_reserved(p),
2756 "roots shouldn't be things within the heap");
2757 #ifdef VALIDATE_MARK_SWEEP
2758 if (ValidateMarkSweep) {
2759 guarantee(!_root_refs_stack->contains(p), "should only be in here once");
2760 _root_refs_stack->push(p);
2761 }
2762 #endif
2763 oop m = *p;
2764 if (m != NULL && mark_bitmap()->is_unmarked(m)) {
2765 if (mark_obj(m)) {
2766 m->follow_contents(cm); // Follow contents of the marked object
2767 }
2768 }
2769 follow_stack(cm);
2770 }
2771
2772 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
2773 while(!cm->overflow_stack()->is_empty()) {
2774 oop obj = cm->overflow_stack()->pop();
2775 obj->follow_contents(cm);
2776 }
2777
2778 oop obj;
2779 // obj is a reference!!!
2780 while (cm->marking_stack()->pop_local(obj)) {
2781 // It would be nice to assert about the type of objects we might
2782 // pop, but they can come from anywhere, unfortunately.
2783 obj->follow_contents(cm);
2784 }
2785 }
2786
2787 void
2788 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
2789 // All klasses on the revisit stack are marked at this point.
2790 // Update and follow all subklass, sibling and implementor links.
2791 for (uint i = 0; i < ParallelGCThreads+1; i++) {
2792 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2793 KeepAliveClosure keep_alive_closure(cm);
2794 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
2795 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
2796 is_alive_closure(),
2797 &keep_alive_closure);
2798 }
2799 follow_stack(cm);
2800 }
2801 }
2802
2803 void
2804 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2805 cm->revisit_klass_stack()->push(k);
2806 }
2807
2808 #ifdef VALIDATE_MARK_SWEEP
2809
2810 void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
2811 if (!ValidateMarkSweep)
2812 return;
2813
2814 if (!isroot) {
2815 if (_pointer_tracking) {
2816 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
2817 _adjusted_pointers->remove(p);
2818 }
2819 } else {
2820 ptrdiff_t index = _root_refs_stack->find(p);
2821 if (index != -1) {
2822 int l = _root_refs_stack->length();
2823 if (l > 0 && l - 1 != index) {
2824 oop* last = _root_refs_stack->pop();
2825 assert(last != p, "should be different");
2826 _root_refs_stack->at_put(index, last);
2827 } else {
2828 _root_refs_stack->remove(p);
2829 }
2830 }
2831 }
2832 }
2833
2834
2835 void PSParallelCompact::check_adjust_pointer(oop* p) {
2836 _adjusted_pointers->push(p);
2837 }
2838
2839
2840 class AdjusterTracker: public OopClosure {
2841 public:
2842 AdjusterTracker() {};
2843 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
2844 };
2845
2846
2847 void PSParallelCompact::track_interior_pointers(oop obj) {
2848 if (ValidateMarkSweep) {
2849 _adjusted_pointers->clear();
2850 _pointer_tracking = true;
2851
2852 AdjusterTracker checker;
2853 obj->oop_iterate(&checker);
2854 }
2855 }
2856
2857
2858 void PSParallelCompact::check_interior_pointers() {
2859 if (ValidateMarkSweep) {
2860 _pointer_tracking = false;
2861 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2862 }
2863 }
2864
2865
2866 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
2867 if (ValidateMarkSweep) {
2868 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2869 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
2870 }
2871 }
2872
2873
2874 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2875 if (ValidateMarkSweep) {
2876 _live_oops->push(p);
2877 _live_oops_size->push(size);
2878 _live_oops_index++;
2879 }
2880 }
2881
2882 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
2883 if (ValidateMarkSweep) {
2884 oop obj = _live_oops->at((int)_live_oops_index);
2885 guarantee(obj == p, "should be the same object");
2886 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
2887 _live_oops_index++;
2888 }
2889 }
2890
2891 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
2892 HeapWord* compaction_top) {
2893 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
2894 "should be moved to forwarded location");
2895 if (ValidateMarkSweep) {
2896 PSParallelCompact::validate_live_oop(oop(q), size);
2897 _live_oops_moved_to->push(oop(compaction_top));
2898 }
2899 if (RecordMarkSweepCompaction) {
2900 _cur_gc_live_oops->push(q);
2901 _cur_gc_live_oops_moved_to->push(compaction_top);
2902 _cur_gc_live_oops_size->push(size);
2903 }
2904 }
2905
2906
2907 void PSParallelCompact::compaction_complete() {
2908 if (RecordMarkSweepCompaction) {
2909 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
2910 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
2911 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
2912
2913 _cur_gc_live_oops = _last_gc_live_oops;
2914 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
2915 _cur_gc_live_oops_size = _last_gc_live_oops_size;
2916 _last_gc_live_oops = _tmp_live_oops;
2917 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
2918 _last_gc_live_oops_size = _tmp_live_oops_size;
2919 }
2920 }
2921
2922
2923 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
2924 if (!RecordMarkSweepCompaction) {
2925 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
2926 return;
2927 }
2928
2929 if (_last_gc_live_oops == NULL) {
2930 tty->print_cr("No compaction information gathered yet");
2931 return;
2932 }
2933
2934 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
2935 HeapWord* old_oop = _last_gc_live_oops->at(i);
2936 size_t sz = _last_gc_live_oops_size->at(i);
2937 if (old_oop <= q && q < (old_oop + sz)) {
2938 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
2939 size_t offset = (q - old_oop);
2940 tty->print_cr("Address " PTR_FORMAT, q);
2941 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
2942 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
2943 return;
2944 }
2945 }
2946
2947 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2948 }
2949 #endif //VALIDATE_MARK_SWEEP
2950
2951 void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
2952 oop obj = *p;
2953 VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
2954 if (obj != NULL) {
2955 oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
2956 assert(new_pointer != NULL || // is forwarding ptr?
2957 obj->is_shared(), // never forwarded?
2958 "should have a new location");
2959 // Just always do the update unconditionally?
2960 if (new_pointer != NULL) {
2961 *p = new_pointer;
2962 assert(Universe::heap()->is_in_reserved(new_pointer),
2963 "should be in object space");
2964 VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
2965 }
2966 }
2967 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
2968 }
2969
2970 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
2971 void
2972 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2973 SpaceId space_id,
2974 size_t beg_chunk,
2975 size_t end_chunk) {
2976 ParallelCompactData& sd = summary_data();
2977 ParMarkBitMap* const mbm = mark_bitmap();
2978
2979 HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk);
2980 HeapWord* const end_addr = sd.chunk_to_addr(end_chunk);
2981 assert(beg_chunk <= end_chunk, "bad chunk range");
2982 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2983
2984 #ifdef ASSERT
2985 // Claim the chunks to avoid triggering an assert when they are marked as
2986 // filled.
2987 for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) {
2988 assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed");
2989 }
2990 #endif // #ifdef ASSERT
2991
2992 if (beg_addr != space(space_id)->bottom()) {
2993 // Find the first live object or block of dead space that *starts* in this
2994 // range of chunks. If a partial object crosses onto the chunk, skip it; it
2995 // will be marked for 'deferred update' when the object head is processed.
2996 // If dead space crosses onto the chunk, it is also skipped; it will be
2997 // filled when the prior chunk is processed. If neither of those apply, the
2998 // first word in the chunk is the start of a live object or dead space.
2999 assert(beg_addr > space(space_id)->bottom(), "sanity");
3000 const ChunkData* const cp = sd.chunk(beg_chunk);
3001 if (cp->partial_obj_size() != 0) {
3002 beg_addr = sd.partial_obj_end(beg_chunk);
3003 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
3004 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
3005 }
3006 }
3007
3008 if (beg_addr < end_addr) {
3009 // A live object or block of dead space starts in this range of Chunks.
3010 HeapWord* const dense_prefix_end = dense_prefix(space_id);
3011
3012 // Create closures and iterate.
3013 UpdateOnlyClosure update_closure(mbm, cm, space_id);
3014 FillClosure fill_closure(cm, space_id);
3015 ParMarkBitMap::IterationStatus status;
3016 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
3017 dense_prefix_end);
3018 if (status == ParMarkBitMap::incomplete) {
3019 update_closure.do_addr(update_closure.source());
3020 }
3021 }
3022
3023 // Mark the chunks as filled.
3024 ChunkData* const beg_cp = sd.chunk(beg_chunk);
3025 ChunkData* const end_cp = sd.chunk(end_chunk);
3026 for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) {
3027 cp->set_completed();
3028 }
3029 }
3030
3031 // Return the SpaceId for the space containing addr. If addr is not in the
3032 // heap, last_space_id is returned. In debug mode it expects the address to be
3033 // in the heap and asserts such.
3034 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
3035 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
3036
3037 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
3038 if (_space_info[id].space()->contains(addr)) {
3039 return SpaceId(id);
3040 }
3041 }
3042
3043 assert(false, "no space contains the addr");
3044 return last_space_id;
3045 }
3046
3047 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
3048 SpaceId id) {
3049 assert(id < last_space_id, "bad space id");
3050
3051 ParallelCompactData& sd = summary_data();
3052 const SpaceInfo* const space_info = _space_info + id;
3053 ObjectStartArray* const start_array = space_info->start_array();
3054
3055 const MutableSpace* const space = space_info->space();
3056 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
3057 HeapWord* const beg_addr = space_info->dense_prefix();
3058 HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top());
3059
3060 const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr);
3061 const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr);
3062 const ChunkData* cur_chunk;
3063 for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) {
3064 HeapWord* const addr = cur_chunk->deferred_obj_addr();
3065 if (addr != NULL) {
3066 if (start_array != NULL) {
3067 start_array->allocate_block(addr);
3068 }
3069 oop(addr)->update_contents(cm);
3070 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
3071 }
3072 }
3073 }
3074
3075 // Skip over count live words starting from beg, and return the address of the
3076 // next live word. Unless marked, the word corresponding to beg is assumed to
3077 // be dead. Callers must either ensure beg does not correspond to the middle of
3078 // an object, or account for those live words in some other way. Callers must
3079 // also ensure that there are enough live words in the range [beg, end) to skip.
3080 HeapWord*
3081 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
3082 {
3083 assert(count > 0, "sanity");
3084
3085 ParMarkBitMap* m = mark_bitmap();
3086 idx_t bits_to_skip = m->words_to_bits(count);
3087 idx_t cur_beg = m->addr_to_bit(beg);
3088 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
3089
3090 do {
3091 cur_beg = m->find_obj_beg(cur_beg, search_end);
3092 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
3093 const size_t obj_bits = cur_end - cur_beg + 1;
3094 if (obj_bits > bits_to_skip) {
3095 return m->bit_to_addr(cur_beg + bits_to_skip);
3096 }
3097 bits_to_skip -= obj_bits;
3098 cur_beg = cur_end + 1;
3099 } while (bits_to_skip > 0);
3100
3101 // Skipping the desired number of words landed just past the end of an object.
3102 // Find the start of the next object.
3103 cur_beg = m->find_obj_beg(cur_beg, search_end);
3104 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
3105 return m->bit_to_addr(cur_beg);
3106 }
3107
3108 HeapWord*
3109 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
3110 size_t src_chunk_idx)
3111 {
3112 ParMarkBitMap* const bitmap = mark_bitmap();
3113 const ParallelCompactData& sd = summary_data();
3114 const size_t ChunkSize = ParallelCompactData::ChunkSize;
3115
3116 assert(sd.is_chunk_aligned(dest_addr), "not aligned");
3117
3118 const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx);
3119 const size_t partial_obj_size = src_chunk_ptr->partial_obj_size();
3120 HeapWord* const src_chunk_destination = src_chunk_ptr->destination();
3121
3122 assert(dest_addr >= src_chunk_destination, "wrong src chunk");
3123 assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty");
3124
3125 HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx);
3126 HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize;
3127
3128 HeapWord* addr = src_chunk_beg;
3129 if (dest_addr == src_chunk_destination) {
3130 // Return the first live word in the source chunk.
3131 if (partial_obj_size == 0) {
3132 addr = bitmap->find_obj_beg(addr, src_chunk_end);
3133 assert(addr < src_chunk_end, "no objects start in src chunk");
3134 }
3135 return addr;
3136 }
3137
3138 // Must skip some live data.
3139 size_t words_to_skip = dest_addr - src_chunk_destination;
3140 assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk");
3141
3142 if (partial_obj_size >= words_to_skip) {
3143 // All the live words to skip are part of the partial object.
3144 addr += words_to_skip;
3145 if (partial_obj_size == words_to_skip) {
3146 // Find the first live word past the partial object.
3147 addr = bitmap->find_obj_beg(addr, src_chunk_end);
3148 assert(addr < src_chunk_end, "wrong src chunk");
3149 }
3150 return addr;
3151 }
3152
3153 // Skip over the partial object (if any).
3154 if (partial_obj_size != 0) {
3155 words_to_skip -= partial_obj_size;
3156 addr += partial_obj_size;
3157 }
3158
3159 // Skip over live words due to objects that start in the chunk.
3160 addr = skip_live_words(addr, src_chunk_end, words_to_skip);
3161 assert(addr < src_chunk_end, "wrong src chunk");
3162 return addr;
3163 }
3164
3165 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
3166 size_t beg_chunk,
3167 HeapWord* end_addr)
3168 {
3169 ParallelCompactData& sd = summary_data();
3170 ChunkData* const beg = sd.chunk(beg_chunk);
3171 HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr);
3172 ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up);
3173 size_t cur_idx = beg_chunk;
3174 for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) {
3175 assert(cur->data_size() > 0, "chunk must have live data");
3176 cur->decrement_destination_count();
3177 if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) {
3178 cm->save_for_processing(cur_idx);
3179 }
3180 }
3181 }
3182
3183 size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure,
3184 SpaceId& src_space_id,
3185 HeapWord*& src_space_top,
3186 HeapWord* end_addr)
3187 {
3188 typedef ParallelCompactData::ChunkData ChunkData;
3189
3190 ParallelCompactData& sd = PSParallelCompact::summary_data();
3191 const size_t chunk_size = ParallelCompactData::ChunkSize;
3192
3193 size_t src_chunk_idx = 0;
3194
3195 // Skip empty chunks (if any) up to the top of the space.
3196 HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr);
3197 ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up);
3198 HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top);
3199 const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up);
3200 while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) {
3201 ++src_chunk_ptr;
3202 }
3203
3204 if (src_chunk_ptr < top_chunk_ptr) {
3205 // The next source chunk is in the current space. Update src_chunk_idx and
3206 // the source address to match src_chunk_ptr.
3207 src_chunk_idx = sd.chunk(src_chunk_ptr);
3208 HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx);
3209 if (src_chunk_addr > closure.source()) {
3210 closure.set_source(src_chunk_addr);
3211 }
3212 return src_chunk_idx;
3213 }
3214
3215 // Switch to a new source space and find the first non-empty chunk.
3216 unsigned int space_id = src_space_id + 1;
3217 assert(space_id < last_space_id, "not enough spaces");
3218
3219 HeapWord* const destination = closure.destination();
3220
3221 do {
3222 MutableSpace* space = _space_info[space_id].space();
3223 HeapWord* const bottom = space->bottom();
3224 const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom);
3225
3226 // Iterate over the spaces that do not compact into themselves.
3227 if (bottom_cp->destination() != bottom) {
3228 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
3229 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
3230
3231 for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
3232 if (src_cp->live_obj_size() > 0) {
3233 // Found it.
3234 assert(src_cp->destination() == destination,
3235 "first live obj in the space must match the destination");
3236 assert(src_cp->partial_obj_size() == 0,
3237 "a space cannot begin with a partial obj");
3238
3239 src_space_id = SpaceId(space_id);
3240 src_space_top = space->top();
3241 const size_t src_chunk_idx = sd.chunk(src_cp);
3242 closure.set_source(sd.chunk_to_addr(src_chunk_idx));
3243 return src_chunk_idx;
3244 } else {
3245 assert(src_cp->data_size() == 0, "sanity");
3246 }
3247 }
3248 }
3249 } while (++space_id < last_space_id);
3250
3251 assert(false, "no source chunk was found");
3252 return 0;
3253 }
3254
3255 void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx)
3256 {
3257 typedef ParMarkBitMap::IterationStatus IterationStatus;
3258 const size_t ChunkSize = ParallelCompactData::ChunkSize;
3259 ParMarkBitMap* const bitmap = mark_bitmap();
3260 ParallelCompactData& sd = summary_data();
3261 ChunkData* const chunk_ptr = sd.chunk(chunk_idx);
3262
3263 // Get the items needed to construct the closure.
3264 HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx);
3265 SpaceId dest_space_id = space_id(dest_addr);
3266 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
3267 HeapWord* new_top = _space_info[dest_space_id].new_top();
3268 assert(dest_addr < new_top, "sanity");
3269 const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize);
3270
3271 // Get the source chunk and related info.
3272 size_t src_chunk_idx = chunk_ptr->source_chunk();
3273 SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx));
3274 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
3275
3276 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3277 closure.set_source(first_src_addr(dest_addr, src_chunk_idx));
3278
3279 // Adjust src_chunk_idx to prepare for decrementing destination counts (the
3280 // destination count is not decremented when a chunk is copied to itself).
3281 if (src_chunk_idx == chunk_idx) {
3282 src_chunk_idx += 1;
3283 }
3284
3285 if (bitmap->is_unmarked(closure.source())) {
3286 // The first source word is in the middle of an object; copy the remainder
3287 // of the object or as much as will fit. The fact that pointer updates were
3288 // deferred will be noted when the object header is processed.
3289 HeapWord* const old_src_addr = closure.source();
3290 closure.copy_partial_obj();
3291 if (closure.is_full()) {
3292 decrement_destination_counts(cm, src_chunk_idx, closure.source());
3293 chunk_ptr->set_deferred_obj_addr(NULL);
3294 chunk_ptr->set_completed();
3295 return;
3296 }
3297
3298 HeapWord* const end_addr = sd.chunk_align_down(closure.source());
3299 if (sd.chunk_align_down(old_src_addr) != end_addr) {
3300 // The partial object was copied from more than one source chunk.
3301 decrement_destination_counts(cm, src_chunk_idx, end_addr);
3302
3303 // Move to the next source chunk, possibly switching spaces as well. All
3304 // args except end_addr may be modified.
3305 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
3306 end_addr);
3307 }
3308 }
3309
3310 do {
3311 HeapWord* const cur_addr = closure.source();
3312 HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1),
3313 src_space_top);
3314 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3315
3316 if (status == ParMarkBitMap::incomplete) {
3317 // The last obj that starts in the source chunk does not end in the chunk.
3318 assert(closure.source() < end_addr, "sanity")
3319 HeapWord* const obj_beg = closure.source();
3320 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3321 src_space_top);
3322 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3323 if (obj_end < range_end) {
3324 // The end was found; the entire object will fit.
3325 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
3326 assert(status != ParMarkBitMap::would_overflow, "sanity");
3327 } else {
3328 // The end was not found; the object will not fit.
3329 assert(range_end < src_space_top, "obj cannot cross space boundary");
3330 status = ParMarkBitMap::would_overflow;
3331 }
3332 }
3333
3334 if (status == ParMarkBitMap::would_overflow) {
3335 // The last object did not fit. Note that interior oop updates were
3336 // deferred, then copy enough of the object to fill the chunk.
3337 chunk_ptr->set_deferred_obj_addr(closure.destination());
3338 status = closure.copy_until_full(); // copies from closure.source()
3339
3340 decrement_destination_counts(cm, src_chunk_idx, closure.source());
3341 chunk_ptr->set_completed();
3342 return;
3343 }
3344
3345 if (status == ParMarkBitMap::full) {
3346 decrement_destination_counts(cm, src_chunk_idx, closure.source());
3347 chunk_ptr->set_deferred_obj_addr(NULL);
3348 chunk_ptr->set_completed();
3349 return;
3350 }
3351
3352 decrement_destination_counts(cm, src_chunk_idx, end_addr);
3353
3354 // Move to the next source chunk, possibly switching spaces as well. All
3355 // args except end_addr may be modified.
3356 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
3357 end_addr);
3358 } while (true);
3359 }
3360
3361 void
3362 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3363 const MutableSpace* sp = space(space_id);
3364 if (sp->is_empty()) {
3365 return;
3366 }
3367
3368 ParallelCompactData& sd = PSParallelCompact::summary_data();
3369 ParMarkBitMap* const bitmap = mark_bitmap();
3370 HeapWord* const dp_addr = dense_prefix(space_id);
3371 HeapWord* beg_addr = sp->bottom();
3372 HeapWord* end_addr = sp->top();
3373
3374 #ifdef ASSERT
3375 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
3376 if (cm->should_verify_only()) {
3377 VerifyUpdateClosure verify_update(cm, sp);
3378 bitmap->iterate(&verify_update, beg_addr, end_addr);
3379 return;
3380 }
3381
3382 if (cm->should_reset_only()) {
3383 ResetObjectsClosure reset_objects(cm);
3384 bitmap->iterate(&reset_objects, beg_addr, end_addr);
3385 return;
3386 }
3387 #endif
3388
3389 const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr);
3390 const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr);
3391 if (beg_chunk < dp_chunk) {
3392 update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk);
3393 }
3394
3395 // The destination of the first live object that starts in the chunk is one
3396 // past the end of the partial object entering the chunk (if any).
3397 HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk);
3398 HeapWord* const new_top = _space_info[space_id].new_top();
3399 assert(new_top >= dest_addr, "bad new_top value");
3400 const size_t words = pointer_delta(new_top, dest_addr);
3401
3402 if (words > 0) {
3403 ObjectStartArray* start_array = _space_info[space_id].start_array();
3404 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3405
3406 ParMarkBitMap::IterationStatus status;
3407 status = bitmap->iterate(&closure, dest_addr, end_addr);
3408 assert(status == ParMarkBitMap::full, "iteration not complete");
3409 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3410 "live objects skipped because closure is full");
3411 }
3412 }
3413
3414 jlong PSParallelCompact::millis_since_last_gc() {
3415 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
3416 // XXX See note in genCollectedHeap::millis_since_last_gc().
3417 if (ret_val < 0) {
3418 NOT_PRODUCT(warning("time warp: %d", ret_val);)
3419 return 0;
3420 }
3421 return ret_val;
3422 }
3423
3424 void PSParallelCompact::reset_millis_since_last_gc() {
3425 _time_of_last_gc = os::javaTimeMillis();
3426 }
3427
3428 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3429 {
3430 if (source() != destination()) {
3431 assert(source() > destination(), "must copy to the left");
3432 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3433 }
3434 update_state(words_remaining());
3435 assert(is_full(), "sanity");
3436 return ParMarkBitMap::full;
3437 }
3438
3439 void MoveAndUpdateClosure::copy_partial_obj()
3440 {
3441 size_t words = words_remaining();
3442
3443 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3444 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3445 if (end_addr < range_end) {
3446 words = bitmap()->obj_size(source(), end_addr);
3447 }
3448
3449 // This test is necessary; if omitted, the pointer updates to a partial object
3450 // that crosses the dense prefix boundary could be overwritten.
3451 if (source() != destination()) {
3452 assert(source() > destination(), "must copy to the left");
3453 Copy::aligned_conjoint_words(source(), destination(), words);
3454 }
3455 update_state(words);
3456 }
3457
3458 ParMarkBitMapClosure::IterationStatus
3459 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3460 assert(destination() != NULL, "sanity");
3461 assert(bitmap()->obj_size(addr) == words, "bad size");
3462
3463 _source = addr;
3464 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3465 destination(), "wrong destination");
3466
3467 if (words > words_remaining()) {
3468 return ParMarkBitMap::would_overflow;
3469 }
3470
3471 // The start_array must be updated even if the object is not moving.
3472 if (_start_array != NULL) {
3473 _start_array->allocate_block(destination());
3474 }
3475
3476 if (destination() != source()) {
3477 assert(destination() < source(), "must copy to the left");
3478 Copy::aligned_conjoint_words(source(), destination(), words);
3479 }
3480
3481 oop moved_oop = (oop) destination();
3482 moved_oop->update_contents(compaction_manager());
3483 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
3484
3485 update_state(words);
3486 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3487 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3488 }
3489
3490 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3491 ParCompactionManager* cm,
3492 PSParallelCompact::SpaceId space_id) :
3493 ParMarkBitMapClosure(mbm, cm),
3494 _space_id(space_id),
3495 _start_array(PSParallelCompact::start_array(space_id))
3496 {
3497 }
3498
3499 // Updates the references in the object to their new values.
3500 ParMarkBitMapClosure::IterationStatus
3501 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3502 do_addr(addr);
3503 return ParMarkBitMap::incomplete;
3504 }
3505
3506 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
3507 ParCompactionManager* cm,
3508 size_t chunk_index) :
3509 ParMarkBitMapClosure(mbm, cm),
3510 _live_data_left(0),
3511 _cur_block(0) {
3512 _chunk_start =
3513 PSParallelCompact::summary_data().chunk_to_addr(chunk_index);
3514 _chunk_end =
3515 PSParallelCompact::summary_data().chunk_to_addr(chunk_index) +
3516 ParallelCompactData::ChunkSize;
3517 _chunk_index = chunk_index;
3518 _cur_block =
3519 PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start);
3520 }
3521
3522 bool BitBlockUpdateClosure::chunk_contains_cur_block() {
3523 return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block);
3524 }
3525
3526 void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) {
3527 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);)
3528 ParallelCompactData& sd = PSParallelCompact::summary_data();
3529 _chunk_index = chunk_index;
3530 _live_data_left = 0;
3531 _chunk_start = sd.chunk_to_addr(chunk_index);
3532 _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize;
3533
3534 // The first block in this chunk
3535 size_t first_block = sd.addr_to_block_idx(_chunk_start);
3536 size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size();
3537
3538 // Set the offset to 0. By definition it should have that value
3539 // but it may have been written while processing an earlier chunk.
3540 if (partial_live_size == 0) {
3541 // No live object extends onto the chunk. The first bit
3542 // in the bit map for the first chunk must be a start bit.
3543 // Although there may not be any marked bits, it is safe
3544 // to set it as a start bit.
3545 sd.block(first_block)->set_start_bit_offset(0);
3546 sd.block(first_block)->set_first_is_start_bit(true);
3547 } else if (sd.partial_obj_ends_in_block(first_block)) {
3548 sd.block(first_block)->set_end_bit_offset(0);
3549 sd.block(first_block)->set_first_is_start_bit(false);
3550 } else {
3551 // The partial object extends beyond the first block.
3552 // There is no object starting in the first block
3553 // so the offset and bit parity are not needed.
3554 // Set the the bit parity to start bit so assertions
3555 // work when not bit is found.
3556 sd.block(first_block)->set_end_bit_offset(0);
3557 sd.block(first_block)->set_first_is_start_bit(false);
3558 }
3559 _cur_block = first_block;
3560 #ifdef ASSERT
3561 if (sd.block(first_block)->first_is_start_bit()) {
3562 assert(!sd.partial_obj_ends_in_block(first_block),
3563 "Partial object cannot end in first block");
3564 }
3565
3566 if (PrintGCDetails && Verbose) {
3567 if (partial_live_size == 1) {
3568 gclog_or_tty->print_cr("first_block " PTR_FORMAT
3569 " _offset " PTR_FORMAT
3570 " _first_is_start_bit %d",
3571 first_block,
3572 sd.block(first_block)->raw_offset(),
3573 sd.block(first_block)->first_is_start_bit());
3574 }
3575 }
3576 #endif
3577 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);)
3578 }
3579
3580 // This method is called when a object has been found (both beginning
3581 // and end of the object) in the range of iteration. This method is
3582 // calculating the words of live data to the left of a block. That live
3583 // data includes any object starting to the left of the block (i.e.,
3584 // the live-data-to-the-left of block AAA will include the full size
3585 // of any object entering AAA).
3586
3587 ParMarkBitMapClosure::IterationStatus
3588 BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3589 // add the size to the block data.
3590 HeapWord* obj = addr;
3591 ParallelCompactData& sd = PSParallelCompact::summary_data();
3592
3593 assert(bitmap()->obj_size(obj) == words, "bad size");
3594 assert(_chunk_start <= obj, "object is not in chunk");
3595 assert(obj + words <= _chunk_end, "object is not in chunk");
3596
3597 // Update the live data to the left
3598 size_t prev_live_data_left = _live_data_left;
3599 _live_data_left = _live_data_left + words;
3600
3601 // Is this object in the current block.
3602 size_t block_of_obj = sd.addr_to_block_idx(obj);
3603 size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1);
3604 HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last);
3605 if (_cur_block < block_of_obj) {
3606
3607 //
3608 // No object crossed the block boundary and this object was found
3609 // on the other side of the block boundary. Update the offset for
3610 // the new block with the data size that does not include this object.
3611 //
3612 // The first bit in block_of_obj is a start bit except in the
3613 // case where the partial object for the chunk extends into
3614 // this block.
3615 if (sd.partial_obj_ends_in_block(block_of_obj)) {
3616 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left);
3617 } else {
3618 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left);
3619 }
3620
3621 // Does this object pass beyond the its block?
3622 if (block_of_obj < block_of_obj_last) {
3623 // Object crosses block boundary. Two blocks need to be udpated:
3624 // the current block where the object started
3625 // the block where the object ends
3626 //
3627 // The offset for blocks with no objects starting in them
3628 // (e.g., blocks between _cur_block and block_of_obj_last)
3629 // should not be needed.
3630 // Note that block_of_obj_last may be in another chunk. If so,
3631 // it should be overwritten later. This is a problem (writting
3632 // into a block in a later chunk) for parallel execution.
3633 assert(obj < block_of_obj_last_addr,
3634 "Object should start in previous block");
3635
3636 // obj is crossing into block_of_obj_last so the first bit
3637 // is and end bit.
3638 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
3639
3640 _cur_block = block_of_obj_last;
3641 } else {
3642 // _first_is_start_bit has already been set correctly
3643 // in the if-then-else above so don't reset it here.
3644 _cur_block = block_of_obj;
3645 }
3646 } else {
3647 // The current block only changes if the object extends beyound
3648 // the block it starts in.
3649 //
3650 // The object starts in the current block.
3651 // Does this object pass beyond the end of it?
3652 if (block_of_obj < block_of_obj_last) {
3653 // Object crosses block boundary.
3654 // See note above on possible blocks between block_of_obj and
3655 // block_of_obj_last
3656 assert(obj < block_of_obj_last_addr,
3657 "Object should start in previous block");
3658
3659 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
3660
3661 _cur_block = block_of_obj_last;
3662 }
3663 }
3664
3665 // Return incomplete if there are more blocks to be done.
3666 if (chunk_contains_cur_block()) {
3667 return ParMarkBitMap::incomplete;
3668 }
3669 return ParMarkBitMap::complete;
3670 }
3671
3672 // Verify the new location using the forwarding pointer
3673 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
3674 // to the initial value.
3675 ParMarkBitMapClosure::IterationStatus
3676 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3677 // The second arg (words) is not used.
3678 oop obj = (oop) addr;
3679 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
3680 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
3681 if (forwarding_ptr == NULL) {
3682 // The object is dead or not moving.
3683 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
3684 "Object liveness is wrong.");
3685 return ParMarkBitMap::incomplete;
3686 }
3687 assert(UseParallelOldGCDensePrefix ||
3688 (HeapMaximumCompactionInterval > 1) ||
3689 (MarkSweepAlwaysCompactCount > 1) ||
3690 (forwarding_ptr == new_pointer),
3691 "Calculation of new location is incorrect");
3692 return ParMarkBitMap::incomplete;
3693 }
3694
3695 // Reset objects modified for debug checking.
3696 ParMarkBitMapClosure::IterationStatus
3697 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
3698 // The second arg (words) is not used.
3699 oop obj = (oop) addr;
3700 obj->init_mark();
3701 return ParMarkBitMap::incomplete;
3702 }
3703
3704 // Prepare for compaction. This method is executed once
3705 // (i.e., by a single thread) before compaction.
3706 // Save the updated location of the intArrayKlassObj for
3707 // filling holes in the dense prefix.
3708 void PSParallelCompact::compact_prologue() {
3709 _updated_int_array_klass_obj = (klassOop)
3710 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3711 }
3712
3713 // The initial implementation of this method created a field
3714 // _next_compaction_space_id in SpaceInfo and initialized
3715 // that field in SpaceInfo::initialize_space_info(). That
3716 // required that _next_compaction_space_id be declared a
3717 // SpaceId in SpaceInfo and that would have required that
3718 // either SpaceId be declared in a separate class or that
3719 // it be declared in SpaceInfo. It didn't seem consistent
3720 // to declare it in SpaceInfo (didn't really fit logically).
3721 // Alternatively, defining a separate class to define SpaceId
3722 // seem excessive. This implementation is simple and localizes
3723 // the knowledge.
3724
3725 PSParallelCompact::SpaceId
3726 PSParallelCompact::next_compaction_space_id(SpaceId id) {
3727 assert(id < last_space_id, "id out of range");
3728 switch (id) {
3729 case perm_space_id :
3730 return last_space_id;
3731 case old_space_id :
3732 return eden_space_id;
3733 case eden_space_id :
3734 return from_space_id;
3735 case from_space_id :
3736 return to_space_id;
3737 case to_space_id :
3738 return last_space_id;
3739 default:
3740 assert(false, "Bad space id");
3741 return last_space_id;
3742 }
3743 }
3744
3745 // Here temporarily for debugging
3746 #ifdef ASSERT
3747 size_t ParallelCompactData::block_idx(BlockData* block) {
3748 size_t index = pointer_delta(block,
3749 PSParallelCompact::summary_data()._block_data, sizeof(BlockData));
3750 return index;
3751 }
3752 #endif