comparison src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @ 375:81cd571500b0

6725697: par compact - rename class ChunkData to RegionData Reviewed-by: iveresov, tonyp
author jcoomes
date Tue, 30 Sep 2008 12:20:22 -0700
parents a4b729f5b611
children 0166ac265d53
comparison
equal deleted inserted replaced
374:a4b729f5b611 375:81cd571500b0
26 #include "incls/_psParallelCompact.cpp.incl" 26 #include "incls/_psParallelCompact.cpp.incl"
27 27
28 #include <math.h> 28 #include <math.h>
29 29
30 // All sizes are in HeapWords. 30 // All sizes are in HeapWords.
31 const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words 31 const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
32 const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize; 32 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
33 const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize; 33 const size_t ParallelCompactData::RegionSizeBytes =
34 const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1; 34 RegionSize << LogHeapWordSize;
35 const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1; 35 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
36 const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask; 36 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
37 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
37 38
38 // 32-bit: 128 words covers 4 bitmap words 39 // 32-bit: 128 words covers 4 bitmap words
39 // 64-bit: 128 words covers 2 bitmap words 40 // 64-bit: 128 words covers 2 bitmap words
40 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words 41 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
41 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize; 42 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
42 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1; 43 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1;
43 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask; 44 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask;
44 45
45 const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize; 46 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
46 47
47 const ParallelCompactData::ChunkData::chunk_sz_t 48 const ParallelCompactData::RegionData::region_sz_t
48 ParallelCompactData::ChunkData::dc_shift = 27; 49 ParallelCompactData::RegionData::dc_shift = 27;
49 50
50 const ParallelCompactData::ChunkData::chunk_sz_t 51 const ParallelCompactData::RegionData::region_sz_t
51 ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift; 52 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
52 53
53 const ParallelCompactData::ChunkData::chunk_sz_t 54 const ParallelCompactData::RegionData::region_sz_t
54 ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift; 55 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
55 56
56 const ParallelCompactData::ChunkData::chunk_sz_t 57 const ParallelCompactData::RegionData::region_sz_t
57 ParallelCompactData::ChunkData::los_mask = ~dc_mask; 58 ParallelCompactData::RegionData::los_mask = ~dc_mask;
58 59
59 const ParallelCompactData::ChunkData::chunk_sz_t 60 const ParallelCompactData::RegionData::region_sz_t
60 ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift; 61 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
61 62
62 const ParallelCompactData::ChunkData::chunk_sz_t 63 const ParallelCompactData::RegionData::region_sz_t
63 ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift; 64 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
64 65
65 #ifdef ASSERT 66 #ifdef ASSERT
66 short ParallelCompactData::BlockData::_cur_phase = 0; 67 short ParallelCompactData::BlockData::_cur_phase = 0;
67 #endif 68 #endif
68 69
103 #ifndef PRODUCT 104 #ifndef PRODUCT
104 const char* PSParallelCompact::space_names[] = { 105 const char* PSParallelCompact::space_names[] = {
105 "perm", "old ", "eden", "from", "to " 106 "perm", "old ", "eden", "from", "to "
106 }; 107 };
107 108
108 void PSParallelCompact::print_chunk_ranges() 109 void PSParallelCompact::print_region_ranges()
109 { 110 {
110 tty->print_cr("space bottom top end new_top"); 111 tty->print_cr("space bottom top end new_top");
111 tty->print_cr("------ ---------- ---------- ---------- ----------"); 112 tty->print_cr("------ ---------- ---------- ---------- ----------");
112 113
113 for (unsigned int id = 0; id < last_space_id; ++id) { 114 for (unsigned int id = 0; id < last_space_id; ++id) {
114 const MutableSpace* space = _space_info[id].space(); 115 const MutableSpace* space = _space_info[id].space();
115 tty->print_cr("%u %s " 116 tty->print_cr("%u %s "
116 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " " 117 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
117 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ", 118 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
118 id, space_names[id], 119 id, space_names[id],
119 summary_data().addr_to_chunk_idx(space->bottom()), 120 summary_data().addr_to_region_idx(space->bottom()),
120 summary_data().addr_to_chunk_idx(space->top()), 121 summary_data().addr_to_region_idx(space->top()),
121 summary_data().addr_to_chunk_idx(space->end()), 122 summary_data().addr_to_region_idx(space->end()),
122 summary_data().addr_to_chunk_idx(_space_info[id].new_top())); 123 summary_data().addr_to_region_idx(_space_info[id].new_top()));
123 } 124 }
124 } 125 }
125 126
126 void 127 void
127 print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c) 128 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
128 { 129 {
129 #define CHUNK_IDX_FORMAT SIZE_FORMAT_W(7) 130 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
130 #define CHUNK_DATA_FORMAT SIZE_FORMAT_W(5) 131 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
131 132
132 ParallelCompactData& sd = PSParallelCompact::summary_data(); 133 ParallelCompactData& sd = PSParallelCompact::summary_data();
133 size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0; 134 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
134 tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " " 135 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
135 CHUNK_IDX_FORMAT " " PTR_FORMAT " " 136 REGION_IDX_FORMAT " " PTR_FORMAT " "
136 CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " " 137 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
137 CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d", 138 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
138 i, c->data_location(), dci, c->destination(), 139 i, c->data_location(), dci, c->destination(),
139 c->partial_obj_size(), c->live_obj_size(), 140 c->partial_obj_size(), c->live_obj_size(),
140 c->data_size(), c->source_chunk(), c->destination_count()); 141 c->data_size(), c->source_region(), c->destination_count());
141 142
142 #undef CHUNK_IDX_FORMAT 143 #undef REGION_IDX_FORMAT
143 #undef CHUNK_DATA_FORMAT 144 #undef REGION_DATA_FORMAT
144 } 145 }
145 146
146 void 147 void
147 print_generic_summary_data(ParallelCompactData& summary_data, 148 print_generic_summary_data(ParallelCompactData& summary_data,
148 HeapWord* const beg_addr, 149 HeapWord* const beg_addr,
149 HeapWord* const end_addr) 150 HeapWord* const end_addr)
150 { 151 {
151 size_t total_words = 0; 152 size_t total_words = 0;
152 size_t i = summary_data.addr_to_chunk_idx(beg_addr); 153 size_t i = summary_data.addr_to_region_idx(beg_addr);
153 const size_t last = summary_data.addr_to_chunk_idx(end_addr); 154 const size_t last = summary_data.addr_to_region_idx(end_addr);
154 HeapWord* pdest = 0; 155 HeapWord* pdest = 0;
155 156
156 while (i <= last) { 157 while (i <= last) {
157 ParallelCompactData::ChunkData* c = summary_data.chunk(i); 158 ParallelCompactData::RegionData* c = summary_data.region(i);
158 if (c->data_size() != 0 || c->destination() != pdest) { 159 if (c->data_size() != 0 || c->destination() != pdest) {
159 print_generic_summary_chunk(i, c); 160 print_generic_summary_region(i, c);
160 total_words += c->data_size(); 161 total_words += c->data_size();
161 pdest = c->destination(); 162 pdest = c->destination();
162 } 163 }
163 ++i; 164 ++i;
164 } 165 }
176 MAX2(space->top(), space_info[id].new_top())); 177 MAX2(space->top(), space_info[id].new_top()));
177 } 178 }
178 } 179 }
179 180
180 void 181 void
181 print_initial_summary_chunk(size_t i, 182 print_initial_summary_region(size_t i,
182 const ParallelCompactData::ChunkData* c, 183 const ParallelCompactData::RegionData* c,
183 bool newline = true) 184 bool newline = true)
184 { 185 {
185 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " " 186 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
186 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " 187 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
187 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d", 188 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
188 i, c->destination(), 189 i, c->destination(),
189 c->partial_obj_size(), c->live_obj_size(), 190 c->partial_obj_size(), c->live_obj_size(),
190 c->data_size(), c->source_chunk(), c->destination_count()); 191 c->data_size(), c->source_region(), c->destination_count());
191 if (newline) tty->cr(); 192 if (newline) tty->cr();
192 } 193 }
193 194
194 void 195 void
195 print_initial_summary_data(ParallelCompactData& summary_data, 196 print_initial_summary_data(ParallelCompactData& summary_data,
196 const MutableSpace* space) { 197 const MutableSpace* space) {
197 if (space->top() == space->bottom()) { 198 if (space->top() == space->bottom()) {
198 return; 199 return;
199 } 200 }
200 201
201 const size_t chunk_size = ParallelCompactData::ChunkSize; 202 const size_t region_size = ParallelCompactData::RegionSize;
202 HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top()); 203 typedef ParallelCompactData::RegionData RegionData;
203 const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up); 204 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
204 const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1); 205 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
206 const RegionData* c = summary_data.region(end_region - 1);
205 HeapWord* end_addr = c->destination() + c->data_size(); 207 HeapWord* end_addr = c->destination() + c->data_size();
206 const size_t live_in_space = pointer_delta(end_addr, space->bottom()); 208 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
207 209
208 // Print (and count) the full chunks at the beginning of the space. 210 // Print (and count) the full regions at the beginning of the space.
209 size_t full_chunk_count = 0; 211 size_t full_region_count = 0;
210 size_t i = summary_data.addr_to_chunk_idx(space->bottom()); 212 size_t i = summary_data.addr_to_region_idx(space->bottom());
211 while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) { 213 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
212 print_initial_summary_chunk(i, summary_data.chunk(i)); 214 print_initial_summary_region(i, summary_data.region(i));
213 ++full_chunk_count; 215 ++full_region_count;
214 ++i; 216 ++i;
215 } 217 }
216 218
217 size_t live_to_right = live_in_space - full_chunk_count * chunk_size; 219 size_t live_to_right = live_in_space - full_region_count * region_size;
218 220
219 double max_reclaimed_ratio = 0.0; 221 double max_reclaimed_ratio = 0.0;
220 size_t max_reclaimed_ratio_chunk = 0; 222 size_t max_reclaimed_ratio_region = 0;
221 size_t max_dead_to_right = 0; 223 size_t max_dead_to_right = 0;
222 size_t max_live_to_right = 0; 224 size_t max_live_to_right = 0;
223 225
224 // Print the 'reclaimed ratio' for chunks while there is something live in the 226 // Print the 'reclaimed ratio' for regions while there is something live in
225 // chunk or to the right of it. The remaining chunks are empty (and 227 // the region or to the right of it. The remaining regions are empty (and
226 // uninteresting), and computing the ratio will result in division by 0. 228 // uninteresting), and computing the ratio will result in division by 0.
227 while (i < end_chunk && live_to_right > 0) { 229 while (i < end_region && live_to_right > 0) {
228 c = summary_data.chunk(i); 230 c = summary_data.region(i);
229 HeapWord* const chunk_addr = summary_data.chunk_to_addr(i); 231 HeapWord* const region_addr = summary_data.region_to_addr(i);
230 const size_t used_to_right = pointer_delta(space->top(), chunk_addr); 232 const size_t used_to_right = pointer_delta(space->top(), region_addr);
231 const size_t dead_to_right = used_to_right - live_to_right; 233 const size_t dead_to_right = used_to_right - live_to_right;
232 const double reclaimed_ratio = double(dead_to_right) / live_to_right; 234 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
233 235
234 if (reclaimed_ratio > max_reclaimed_ratio) { 236 if (reclaimed_ratio > max_reclaimed_ratio) {
235 max_reclaimed_ratio = reclaimed_ratio; 237 max_reclaimed_ratio = reclaimed_ratio;
236 max_reclaimed_ratio_chunk = i; 238 max_reclaimed_ratio_region = i;
237 max_dead_to_right = dead_to_right; 239 max_dead_to_right = dead_to_right;
238 max_live_to_right = live_to_right; 240 max_live_to_right = live_to_right;
239 } 241 }
240 242
241 print_initial_summary_chunk(i, c, false); 243 print_initial_summary_region(i, c, false);
242 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10), 244 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
243 reclaimed_ratio, dead_to_right, live_to_right); 245 reclaimed_ratio, dead_to_right, live_to_right);
244 246
245 live_to_right -= c->data_size(); 247 live_to_right -= c->data_size();
246 ++i; 248 ++i;
247 } 249 }
248 250
249 // Any remaining chunks are empty. Print one more if there is one. 251 // Any remaining regions are empty. Print one more if there is one.
250 if (i < end_chunk) { 252 if (i < end_region) {
251 print_initial_summary_chunk(i, summary_data.chunk(i)); 253 print_initial_summary_region(i, summary_data.region(i));
252 } 254 }
253 255
254 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " " 256 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
255 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f", 257 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
256 max_reclaimed_ratio_chunk, max_dead_to_right, 258 max_reclaimed_ratio_region, max_dead_to_right,
257 max_live_to_right, max_reclaimed_ratio); 259 max_live_to_right, max_reclaimed_ratio);
258 } 260 }
259 261
260 void 262 void
261 print_initial_summary_data(ParallelCompactData& summary_data, 263 print_initial_summary_data(ParallelCompactData& summary_data,
283 285
284 ParallelCompactData::ParallelCompactData() 286 ParallelCompactData::ParallelCompactData()
285 { 287 {
286 _region_start = 0; 288 _region_start = 0;
287 289
288 _chunk_vspace = 0; 290 _region_vspace = 0;
289 _chunk_data = 0; 291 _region_data = 0;
290 _chunk_count = 0; 292 _region_count = 0;
291 293
292 _block_vspace = 0; 294 _block_vspace = 0;
293 _block_data = 0; 295 _block_data = 0;
294 _block_count = 0; 296 _block_count = 0;
295 } 297 }
298 { 300 {
299 _region_start = covered_region.start(); 301 _region_start = covered_region.start();
300 const size_t region_size = covered_region.word_size(); 302 const size_t region_size = covered_region.word_size();
301 DEBUG_ONLY(_region_end = _region_start + region_size;) 303 DEBUG_ONLY(_region_end = _region_start + region_size;)
302 304
303 assert(chunk_align_down(_region_start) == _region_start, 305 assert(region_align_down(_region_start) == _region_start,
304 "region start not aligned"); 306 "region start not aligned");
305 assert((region_size & ChunkSizeOffsetMask) == 0, 307 assert((region_size & RegionSizeOffsetMask) == 0,
306 "region size not a multiple of ChunkSize"); 308 "region size not a multiple of RegionSize");
307 309
308 bool result = initialize_chunk_data(region_size); 310 bool result = initialize_region_data(region_size);
309 311
310 // Initialize the block data if it will be used for updating pointers, or if 312 // Initialize the block data if it will be used for updating pointers, or if
311 // this is a debug build. 313 // this is a debug build.
312 if (!UseParallelOldGCChunkPointerCalc || trueInDebug) { 314 if (!UseParallelOldGCRegionPointerCalc || trueInDebug) {
313 result = result && initialize_block_data(region_size); 315 result = result && initialize_block_data(region_size);
314 } 316 }
315 317
316 return result; 318 return result;
317 } 319 }
340 } 342 }
341 343
342 return 0; 344 return 0;
343 } 345 }
344 346
345 bool ParallelCompactData::initialize_chunk_data(size_t region_size) 347 bool ParallelCompactData::initialize_region_data(size_t region_size)
346 { 348 {
347 const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize; 349 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
348 _chunk_vspace = create_vspace(count, sizeof(ChunkData)); 350 _region_vspace = create_vspace(count, sizeof(RegionData));
349 if (_chunk_vspace != 0) { 351 if (_region_vspace != 0) {
350 _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr(); 352 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
351 _chunk_count = count; 353 _region_count = count;
352 return true; 354 return true;
353 } 355 }
354 return false; 356 return false;
355 } 357 }
356 358
369 void ParallelCompactData::clear() 371 void ParallelCompactData::clear()
370 { 372 {
371 if (_block_data) { 373 if (_block_data) {
372 memset(_block_data, 0, _block_vspace->committed_size()); 374 memset(_block_data, 0, _block_vspace->committed_size());
373 } 375 }
374 memset(_chunk_data, 0, _chunk_vspace->committed_size()); 376 memset(_region_data, 0, _region_vspace->committed_size());
375 } 377 }
376 378
377 void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) { 379 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
378 assert(beg_chunk <= _chunk_count, "beg_chunk out of range"); 380 assert(beg_region <= _region_count, "beg_region out of range");
379 assert(end_chunk <= _chunk_count, "end_chunk out of range"); 381 assert(end_region <= _region_count, "end_region out of range");
380 assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize"); 382 assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
381 383
382 const size_t chunk_cnt = end_chunk - beg_chunk; 384 const size_t region_cnt = end_region - beg_region;
383 385
384 if (_block_data) { 386 if (_block_data) {
385 const size_t blocks_per_chunk = ChunkSize / BlockSize; 387 const size_t blocks_per_region = RegionSize / BlockSize;
386 const size_t beg_block = beg_chunk * blocks_per_chunk; 388 const size_t beg_block = beg_region * blocks_per_region;
387 const size_t block_cnt = chunk_cnt * blocks_per_chunk; 389 const size_t block_cnt = region_cnt * blocks_per_region;
388 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData)); 390 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
389 } 391 }
390 memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData)); 392 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
391 } 393 }
392 394
393 HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const 395 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
394 { 396 {
395 const ChunkData* cur_cp = chunk(chunk_idx); 397 const RegionData* cur_cp = region(region_idx);
396 const ChunkData* const end_cp = chunk(chunk_count() - 1); 398 const RegionData* const end_cp = region(region_count() - 1);
397 399
398 HeapWord* result = chunk_to_addr(chunk_idx); 400 HeapWord* result = region_to_addr(region_idx);
399 if (cur_cp < end_cp) { 401 if (cur_cp < end_cp) {
400 do { 402 do {
401 result += cur_cp->partial_obj_size(); 403 result += cur_cp->partial_obj_size();
402 } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp); 404 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
403 } 405 }
404 return result; 406 return result;
405 } 407 }
406 408
407 void ParallelCompactData::add_obj(HeapWord* addr, size_t len) 409 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
408 { 410 {
409 const size_t obj_ofs = pointer_delta(addr, _region_start); 411 const size_t obj_ofs = pointer_delta(addr, _region_start);
410 const size_t beg_chunk = obj_ofs >> Log2ChunkSize; 412 const size_t beg_region = obj_ofs >> Log2RegionSize;
411 const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize; 413 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
412 414
413 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);) 415 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
414 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) 416 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
415 417
416 if (beg_chunk == end_chunk) { 418 if (beg_region == end_region) {
417 // All in one chunk. 419 // All in one region.
418 _chunk_data[beg_chunk].add_live_obj(len); 420 _region_data[beg_region].add_live_obj(len);
419 return; 421 return;
420 } 422 }
421 423
422 // First chunk. 424 // First region.
423 const size_t beg_ofs = chunk_offset(addr); 425 const size_t beg_ofs = region_offset(addr);
424 _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs); 426 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
425 427
426 klassOop klass = ((oop)addr)->klass(); 428 klassOop klass = ((oop)addr)->klass();
427 // Middle chunks--completely spanned by this object. 429 // Middle regions--completely spanned by this object.
428 for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) { 430 for (size_t region = beg_region + 1; region < end_region; ++region) {
429 _chunk_data[chunk].set_partial_obj_size(ChunkSize); 431 _region_data[region].set_partial_obj_size(RegionSize);
430 _chunk_data[chunk].set_partial_obj_addr(addr); 432 _region_data[region].set_partial_obj_addr(addr);
431 } 433 }
432 434
433 // Last chunk. 435 // Last region.
434 const size_t end_ofs = chunk_offset(addr + len - 1); 436 const size_t end_ofs = region_offset(addr + len - 1);
435 _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1); 437 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
436 _chunk_data[end_chunk].set_partial_obj_addr(addr); 438 _region_data[end_region].set_partial_obj_addr(addr);
437 } 439 }
438 440
439 void 441 void
440 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) 442 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
441 { 443 {
442 assert(chunk_offset(beg) == 0, "not ChunkSize aligned"); 444 assert(region_offset(beg) == 0, "not RegionSize aligned");
443 assert(chunk_offset(end) == 0, "not ChunkSize aligned"); 445 assert(region_offset(end) == 0, "not RegionSize aligned");
444 446
445 size_t cur_chunk = addr_to_chunk_idx(beg); 447 size_t cur_region = addr_to_region_idx(beg);
446 const size_t end_chunk = addr_to_chunk_idx(end); 448 const size_t end_region = addr_to_region_idx(end);
447 HeapWord* addr = beg; 449 HeapWord* addr = beg;
448 while (cur_chunk < end_chunk) { 450 while (cur_region < end_region) {
449 _chunk_data[cur_chunk].set_destination(addr); 451 _region_data[cur_region].set_destination(addr);
450 _chunk_data[cur_chunk].set_destination_count(0); 452 _region_data[cur_region].set_destination_count(0);
451 _chunk_data[cur_chunk].set_source_chunk(cur_chunk); 453 _region_data[cur_region].set_source_region(cur_region);
452 _chunk_data[cur_chunk].set_data_location(addr); 454 _region_data[cur_region].set_data_location(addr);
453 455
454 // Update live_obj_size so the chunk appears completely full. 456 // Update live_obj_size so the region appears completely full.
455 size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size(); 457 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
456 _chunk_data[cur_chunk].set_live_obj_size(live_size); 458 _region_data[cur_region].set_live_obj_size(live_size);
457 459
458 ++cur_chunk; 460 ++cur_region;
459 addr += ChunkSize; 461 addr += RegionSize;
460 } 462 }
461 } 463 }
462 464
463 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, 465 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
464 HeapWord* source_beg, HeapWord* source_end, 466 HeapWord* source_beg, HeapWord* source_end,
465 HeapWord** target_next, 467 HeapWord** target_next,
466 HeapWord** source_next) { 468 HeapWord** source_next) {
467 // This is too strict. 469 // This is too strict.
468 // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned"); 470 // assert(region_offset(source_beg) == 0, "not RegionSize aligned");
469 471
470 if (TraceParallelOldGCSummaryPhase) { 472 if (TraceParallelOldGCSummaryPhase) {
471 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " 473 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
472 "sb=" PTR_FORMAT " se=" PTR_FORMAT " " 474 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
473 "tn=" PTR_FORMAT " sn=" PTR_FORMAT, 475 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
475 source_beg, source_end, 477 source_beg, source_end,
476 target_next != 0 ? *target_next : (HeapWord*) 0, 478 target_next != 0 ? *target_next : (HeapWord*) 0,
477 source_next != 0 ? *source_next : (HeapWord*) 0); 479 source_next != 0 ? *source_next : (HeapWord*) 0);
478 } 480 }
479 481
480 size_t cur_chunk = addr_to_chunk_idx(source_beg); 482 size_t cur_region = addr_to_region_idx(source_beg);
481 const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end)); 483 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
482 484
483 HeapWord *dest_addr = target_beg; 485 HeapWord *dest_addr = target_beg;
484 while (cur_chunk < end_chunk) { 486 while (cur_region < end_region) {
485 size_t words = _chunk_data[cur_chunk].data_size(); 487 size_t words = _region_data[cur_region].data_size();
486 488
487 #if 1 489 #if 1
488 assert(pointer_delta(target_end, dest_addr) >= words, 490 assert(pointer_delta(target_end, dest_addr) >= words,
489 "source region does not fit into target region"); 491 "source region does not fit into target region");
490 #else 492 #else
491 // XXX - need some work on the corner cases here. If the chunk does not 493 // XXX - need some work on the corner cases here. If the region does not
492 // fit, then must either make sure any partial_obj from the chunk fits, or 494 // fit, then must either make sure any partial_obj from the region fits, or
493 // 'undo' the initial part of the partial_obj that is in the previous chunk. 495 // "undo" the initial part of the partial_obj that is in the previous
496 // region.
494 if (dest_addr + words >= target_end) { 497 if (dest_addr + words >= target_end) {
495 // Let the caller know where to continue. 498 // Let the caller know where to continue.
496 *target_next = dest_addr; 499 *target_next = dest_addr;
497 *source_next = chunk_to_addr(cur_chunk); 500 *source_next = region_to_addr(cur_region);
498 return false; 501 return false;
499 } 502 }
500 #endif // #if 1 503 #endif // #if 1
501 504
502 _chunk_data[cur_chunk].set_destination(dest_addr); 505 _region_data[cur_region].set_destination(dest_addr);
503 506
504 // Set the destination_count for cur_chunk, and if necessary, update 507 // Set the destination_count for cur_region, and if necessary, update
505 // source_chunk for a destination chunk. The source_chunk field is updated 508 // source_region for a destination region. The source_region field is
506 // if cur_chunk is the first (left-most) chunk to be copied to a destination 509 // updated if cur_region is the first (left-most) region to be copied to a
507 // chunk. 510 // destination region.
508 // 511 //
509 // The destination_count calculation is a bit subtle. A chunk that has data 512 // The destination_count calculation is a bit subtle. A region that has
510 // that compacts into itself does not count itself as a destination. This 513 // data that compacts into itself does not count itself as a destination.
511 // maintains the invariant that a zero count means the chunk is available 514 // This maintains the invariant that a zero count means the region is
512 // and can be claimed and then filled. 515 // available and can be claimed and then filled.
513 if (words > 0) { 516 if (words > 0) {
514 HeapWord* const last_addr = dest_addr + words - 1; 517 HeapWord* const last_addr = dest_addr + words - 1;
515 const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr); 518 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
516 const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr); 519 const size_t dest_region_2 = addr_to_region_idx(last_addr);
517 #if 0 520 #if 0
518 // Initially assume that the destination chunks will be the same and 521 // Initially assume that the destination regions will be the same and
519 // adjust the value below if necessary. Under this assumption, if 522 // adjust the value below if necessary. Under this assumption, if
520 // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely 523 // cur_region == dest_region_2, then cur_region will be compacted
521 // into itself. 524 // completely into itself.
522 uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1; 525 uint destination_count = cur_region == dest_region_2 ? 0 : 1;
523 if (dest_chunk_1 != dest_chunk_2) { 526 if (dest_region_1 != dest_region_2) {
524 // Destination chunks differ; adjust destination_count. 527 // Destination regions differ; adjust destination_count.
525 destination_count += 1; 528 destination_count += 1;
526 // Data from cur_chunk will be copied to the start of dest_chunk_2. 529 // Data from cur_region will be copied to the start of dest_region_2.
527 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); 530 _region_data[dest_region_2].set_source_region(cur_region);
528 } else if (chunk_offset(dest_addr) == 0) { 531 } else if (region_offset(dest_addr) == 0) {
529 // Data from cur_chunk will be copied to the start of the destination 532 // Data from cur_region will be copied to the start of the destination
530 // chunk. 533 // region.
531 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); 534 _region_data[dest_region_1].set_source_region(cur_region);
532 } 535 }
533 #else 536 #else
534 // Initially assume that the destination chunks will be different and 537 // Initially assume that the destination regions will be different and
535 // adjust the value below if necessary. Under this assumption, if 538 // adjust the value below if necessary. Under this assumption, if
536 // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially 539 // cur_region == dest_region2, then cur_region will be compacted partially
537 // into dest_chunk_1 and partially into itself. 540 // into dest_region_1 and partially into itself.
538 uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2; 541 uint destination_count = cur_region == dest_region_2 ? 1 : 2;
539 if (dest_chunk_1 != dest_chunk_2) { 542 if (dest_region_1 != dest_region_2) {
540 // Data from cur_chunk will be copied to the start of dest_chunk_2. 543 // Data from cur_region will be copied to the start of dest_region_2.
541 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); 544 _region_data[dest_region_2].set_source_region(cur_region);
542 } else { 545 } else {
543 // Destination chunks are the same; adjust destination_count. 546 // Destination regions are the same; adjust destination_count.
544 destination_count -= 1; 547 destination_count -= 1;
545 if (chunk_offset(dest_addr) == 0) { 548 if (region_offset(dest_addr) == 0) {
546 // Data from cur_chunk will be copied to the start of the destination 549 // Data from cur_region will be copied to the start of the destination
547 // chunk. 550 // region.
548 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); 551 _region_data[dest_region_1].set_source_region(cur_region);
549 } 552 }
550 } 553 }
551 #endif // #if 0 554 #endif // #if 0
552 555
553 _chunk_data[cur_chunk].set_destination_count(destination_count); 556 _region_data[cur_region].set_destination_count(destination_count);
554 _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk)); 557 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
555 dest_addr += words; 558 dest_addr += words;
556 } 559 }
557 560
558 ++cur_chunk; 561 ++cur_region;
559 } 562 }
560 563
561 *target_next = dest_addr; 564 *target_next = dest_addr;
562 return true; 565 return true;
563 } 566 }
564 567
565 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) { 568 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
566 HeapWord* block_addr = block_to_addr(block_index); 569 HeapWord* block_addr = block_to_addr(block_index);
567 HeapWord* block_end_addr = block_addr + BlockSize; 570 HeapWord* block_end_addr = block_addr + BlockSize;
568 size_t chunk_index = addr_to_chunk_idx(block_addr); 571 size_t region_index = addr_to_region_idx(block_addr);
569 HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index); 572 HeapWord* partial_obj_end_addr = partial_obj_end(region_index);
570 573
571 // An object that ends at the end of the block, ends 574 // An object that ends at the end of the block, ends
572 // in the block (the last word of the object is to 575 // in the block (the last word of the object is to
573 // the left of the end). 576 // the left of the end).
574 if ((block_addr < partial_obj_end_addr) && 577 if ((block_addr < partial_obj_end_addr) &&
579 return false; 582 return false;
580 } 583 }
581 584
582 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { 585 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
583 HeapWord* result = NULL; 586 HeapWord* result = NULL;
584 if (UseParallelOldGCChunkPointerCalc) { 587 if (UseParallelOldGCRegionPointerCalc) {
585 result = chunk_calc_new_pointer(addr); 588 result = region_calc_new_pointer(addr);
586 } else { 589 } else {
587 result = block_calc_new_pointer(addr); 590 result = block_calc_new_pointer(addr);
588 } 591 }
589 return result; 592 return result;
590 } 593 }
593 // for every reference. 596 // for every reference.
594 // Try to restructure this so that a NULL is returned if 597 // Try to restructure this so that a NULL is returned if
595 // the object is dead. But don't wast the cycles to explicitly check 598 // the object is dead. But don't wast the cycles to explicitly check
596 // that it is dead since only live objects should be passed in. 599 // that it is dead since only live objects should be passed in.
597 600
598 HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) { 601 HeapWord* ParallelCompactData::region_calc_new_pointer(HeapWord* addr) {
599 assert(addr != NULL, "Should detect NULL oop earlier"); 602 assert(addr != NULL, "Should detect NULL oop earlier");
600 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); 603 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
601 #ifdef ASSERT 604 #ifdef ASSERT
602 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { 605 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
603 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); 606 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
604 } 607 }
605 #endif 608 #endif
606 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); 609 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
607 610
608 // Chunk covering the object. 611 // Region covering the object.
609 size_t chunk_index = addr_to_chunk_idx(addr); 612 size_t region_index = addr_to_region_idx(addr);
610 const ChunkData* const chunk_ptr = chunk(chunk_index); 613 const RegionData* const region_ptr = region(region_index);
611 HeapWord* const chunk_addr = chunk_align_down(addr); 614 HeapWord* const region_addr = region_align_down(addr);
612 615
613 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); 616 assert(addr < region_addr + RegionSize, "Region does not cover object");
614 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); 617 assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
615 618
616 HeapWord* result = chunk_ptr->destination(); 619 HeapWord* result = region_ptr->destination();
617 620
618 // If all the data in the chunk is live, then the new location of the object 621 // If all the data in the region is live, then the new location of the object
619 // can be calculated from the destination of the chunk plus the offset of the 622 // can be calculated from the destination of the region plus the offset of the
620 // object in the chunk. 623 // object in the region.
621 if (chunk_ptr->data_size() == ChunkSize) { 624 if (region_ptr->data_size() == RegionSize) {
622 result += pointer_delta(addr, chunk_addr); 625 result += pointer_delta(addr, region_addr);
623 return result; 626 return result;
624 } 627 }
625 628
626 // The new location of the object is 629 // The new location of the object is
627 // chunk destination + 630 // region destination +
628 // size of the partial object extending onto the chunk + 631 // size of the partial object extending onto the region +
629 // sizes of the live objects in the Chunk that are to the left of addr 632 // sizes of the live objects in the Region that are to the left of addr
630 const size_t partial_obj_size = chunk_ptr->partial_obj_size(); 633 const size_t partial_obj_size = region_ptr->partial_obj_size();
631 HeapWord* const search_start = chunk_addr + partial_obj_size; 634 HeapWord* const search_start = region_addr + partial_obj_size;
632 635
633 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); 636 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
634 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); 637 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
635 638
636 result += partial_obj_size + live_to_left; 639 result += partial_obj_size + live_to_left;
646 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); 649 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
647 } 650 }
648 #endif 651 #endif
649 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); 652 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
650 653
651 // Chunk covering the object. 654 // Region covering the object.
652 size_t chunk_index = addr_to_chunk_idx(addr); 655 size_t region_index = addr_to_region_idx(addr);
653 const ChunkData* const chunk_ptr = chunk(chunk_index); 656 const RegionData* const region_ptr = region(region_index);
654 HeapWord* const chunk_addr = chunk_align_down(addr); 657 HeapWord* const region_addr = region_align_down(addr);
655 658
656 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); 659 assert(addr < region_addr + RegionSize, "Region does not cover object");
657 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); 660 assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
658 661
659 HeapWord* result = chunk_ptr->destination(); 662 HeapWord* result = region_ptr->destination();
660 663
661 // If all the data in the chunk is live, then the new location of the object 664 // If all the data in the region is live, then the new location of the object
662 // can be calculated from the destination of the chunk plus the offset of the 665 // can be calculated from the destination of the region plus the offset of the
663 // object in the chunk. 666 // object in the region.
664 if (chunk_ptr->data_size() == ChunkSize) { 667 if (region_ptr->data_size() == RegionSize) {
665 result += pointer_delta(addr, chunk_addr); 668 result += pointer_delta(addr, region_addr);
666 return result; 669 return result;
667 } 670 }
668 671
669 // The new location of the object is 672 // The new location of the object is
670 // chunk destination + 673 // region destination +
671 // block offset + 674 // block offset +
672 // sizes of the live objects in the Block that are to the left of addr 675 // sizes of the live objects in the Block that are to the left of addr
673 const size_t block_offset = addr_to_block_ptr(addr)->offset(); 676 const size_t block_offset = addr_to_block_ptr(addr)->offset();
674 HeapWord* const search_start = chunk_addr + block_offset; 677 HeapWord* const search_start = region_addr + block_offset;
675 678
676 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); 679 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
677 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); 680 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
678 681
679 result += block_offset + live_to_left; 682 result += block_offset + live_to_left;
680 assert(result <= addr, "object cannot move to the right"); 683 assert(result <= addr, "object cannot move to the right");
681 assert(result == chunk_calc_new_pointer(addr), "Should match"); 684 assert(result == region_calc_new_pointer(addr), "Should match");
682 return result; 685 return result;
683 } 686 }
684 687
685 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) { 688 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
686 klassOop updated_klass; 689 klassOop updated_klass;
703 } 706 }
704 } 707 }
705 708
706 void ParallelCompactData::verify_clear() 709 void ParallelCompactData::verify_clear()
707 { 710 {
708 verify_clear(_chunk_vspace); 711 verify_clear(_region_vspace);
709 verify_clear(_block_vspace); 712 verify_clear(_block_vspace);
710 } 713 }
711 #endif // #ifdef ASSERT 714 #endif // #ifdef ASSERT
712 715
713 #ifdef NOT_PRODUCT 716 #ifdef NOT_PRODUCT
714 ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) { 717 ParallelCompactData::RegionData* debug_region(size_t region_index) {
715 ParallelCompactData& sd = PSParallelCompact::summary_data(); 718 ParallelCompactData& sd = PSParallelCompact::summary_data();
716 return sd.chunk(chunk_index); 719 return sd.region(region_index);
717 } 720 }
718 #endif 721 #endif
719 722
720 elapsedTimer PSParallelCompact::_accumulated_time; 723 elapsedTimer PSParallelCompact::_accumulated_time;
721 unsigned int PSParallelCompact::_total_invocations = 0; 724 unsigned int PSParallelCompact::_total_invocations = 0;
864 867
865 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot); 868 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
866 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top)); 869 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
867 _mark_bitmap.clear_range(beg_bit, end_bit); 870 _mark_bitmap.clear_range(beg_bit, end_bit);
868 871
869 const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot); 872 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
870 const size_t end_chunk = 873 const size_t end_region =
871 _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top)); 874 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
872 _summary_data.clear_range(beg_chunk, end_chunk); 875 _summary_data.clear_range(beg_region, end_region);
873 } 876 }
874 877
875 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) 878 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
876 { 879 {
877 // Update the from & to space pointers in space_info, since they are swapped 880 // Update the from & to space pointers in space_info, since they are swapped
983 986
984 HeapWord* 987 HeapWord*
985 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id, 988 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
986 bool maximum_compaction) 989 bool maximum_compaction)
987 { 990 {
988 const size_t chunk_size = ParallelCompactData::ChunkSize; 991 const size_t region_size = ParallelCompactData::RegionSize;
989 const ParallelCompactData& sd = summary_data(); 992 const ParallelCompactData& sd = summary_data();
990 993
991 const MutableSpace* const space = _space_info[id].space(); 994 const MutableSpace* const space = _space_info[id].space();
992 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); 995 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
993 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom()); 996 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
994 const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up); 997 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
995 998
996 // Skip full chunks at the beginning of the space--they are necessarily part 999 // Skip full regions at the beginning of the space--they are necessarily part
997 // of the dense prefix. 1000 // of the dense prefix.
998 size_t full_count = 0; 1001 size_t full_count = 0;
999 const ChunkData* cp; 1002 const RegionData* cp;
1000 for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) { 1003 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1001 ++full_count; 1004 ++full_count;
1002 } 1005 }
1003 1006
1004 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); 1007 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1005 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; 1008 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1006 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval; 1009 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1007 if (maximum_compaction || cp == end_cp || interval_ended) { 1010 if (maximum_compaction || cp == end_cp || interval_ended) {
1008 _maximum_compaction_gc_num = total_invocations(); 1011 _maximum_compaction_gc_num = total_invocations();
1009 return sd.chunk_to_addr(cp); 1012 return sd.region_to_addr(cp);
1010 } 1013 }
1011 1014
1012 HeapWord* const new_top = _space_info[id].new_top(); 1015 HeapWord* const new_top = _space_info[id].new_top();
1013 const size_t space_live = pointer_delta(new_top, space->bottom()); 1016 const size_t space_live = pointer_delta(new_top, space->bottom());
1014 const size_t space_used = space->used_in_words(); 1017 const size_t space_used = space->used_in_words();
1027 space_live, space_used, 1030 space_live, space_used,
1028 space_capacity); 1031 space_capacity);
1029 } 1032 }
1030 1033
1031 // XXX - Use binary search? 1034 // XXX - Use binary search?
1032 HeapWord* dense_prefix = sd.chunk_to_addr(cp); 1035 HeapWord* dense_prefix = sd.region_to_addr(cp);
1033 const ChunkData* full_cp = cp; 1036 const RegionData* full_cp = cp;
1034 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1); 1037 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1035 while (cp < end_cp) { 1038 while (cp < end_cp) {
1036 HeapWord* chunk_destination = cp->destination(); 1039 HeapWord* region_destination = cp->destination();
1037 const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination); 1040 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1038 if (TraceParallelOldGCDensePrefix && Verbose) { 1041 if (TraceParallelOldGCDensePrefix && Verbose) {
1039 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " " 1042 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1040 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8), 1043 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
1041 sd.chunk(cp), chunk_destination, 1044 sd.region(cp), region_destination,
1042 dense_prefix, cur_deadwood); 1045 dense_prefix, cur_deadwood);
1043 } 1046 }
1044 1047
1045 if (cur_deadwood >= deadwood_goal) { 1048 if (cur_deadwood >= deadwood_goal) {
1046 // Found the chunk that has the correct amount of deadwood to the left. 1049 // Found the region that has the correct amount of deadwood to the left.
1047 // This typically occurs after crossing a fairly sparse set of chunks, so 1050 // This typically occurs after crossing a fairly sparse set of regions, so
1048 // iterate backwards over those sparse chunks, looking for the chunk that 1051 // iterate backwards over those sparse regions, looking for the region
1049 // has the lowest density of live objects 'to the right.' 1052 // that has the lowest density of live objects 'to the right.'
1050 size_t space_to_left = sd.chunk(cp) * chunk_size; 1053 size_t space_to_left = sd.region(cp) * region_size;
1051 size_t live_to_left = space_to_left - cur_deadwood; 1054 size_t live_to_left = space_to_left - cur_deadwood;
1052 size_t space_to_right = space_capacity - space_to_left; 1055 size_t space_to_right = space_capacity - space_to_left;
1053 size_t live_to_right = space_live - live_to_left; 1056 size_t live_to_right = space_live - live_to_left;
1054 double density_to_right = double(live_to_right) / space_to_right; 1057 double density_to_right = double(live_to_right) / space_to_right;
1055 while (cp > full_cp) { 1058 while (cp > full_cp) {
1056 --cp; 1059 --cp;
1057 const size_t prev_chunk_live_to_right = live_to_right - cp->data_size(); 1060 const size_t prev_region_live_to_right = live_to_right -
1058 const size_t prev_chunk_space_to_right = space_to_right + chunk_size; 1061 cp->data_size();
1059 double prev_chunk_density_to_right = 1062 const size_t prev_region_space_to_right = space_to_right + region_size;
1060 double(prev_chunk_live_to_right) / prev_chunk_space_to_right; 1063 double prev_region_density_to_right =
1061 if (density_to_right <= prev_chunk_density_to_right) { 1064 double(prev_region_live_to_right) / prev_region_space_to_right;
1065 if (density_to_right <= prev_region_density_to_right) {
1062 return dense_prefix; 1066 return dense_prefix;
1063 } 1067 }
1064 if (TraceParallelOldGCDensePrefix && Verbose) { 1068 if (TraceParallelOldGCDensePrefix && Verbose) {
1065 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f " 1069 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1066 "pc_d2r=%10.8f", sd.chunk(cp), density_to_right, 1070 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1067 prev_chunk_density_to_right); 1071 prev_region_density_to_right);
1068 } 1072 }
1069 dense_prefix -= chunk_size; 1073 dense_prefix -= region_size;
1070 live_to_right = prev_chunk_live_to_right; 1074 live_to_right = prev_region_live_to_right;
1071 space_to_right = prev_chunk_space_to_right; 1075 space_to_right = prev_region_space_to_right;
1072 density_to_right = prev_chunk_density_to_right; 1076 density_to_right = prev_region_density_to_right;
1073 } 1077 }
1074 return dense_prefix; 1078 return dense_prefix;
1075 } 1079 }
1076 1080
1077 dense_prefix += chunk_size; 1081 dense_prefix += region_size;
1078 ++cp; 1082 ++cp;
1079 } 1083 }
1080 1084
1081 return dense_prefix; 1085 return dense_prefix;
1082 } 1086 }
1085 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm, 1089 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1086 const SpaceId id, 1090 const SpaceId id,
1087 const bool maximum_compaction, 1091 const bool maximum_compaction,
1088 HeapWord* const addr) 1092 HeapWord* const addr)
1089 { 1093 {
1090 const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr); 1094 const size_t region_idx = summary_data().addr_to_region_idx(addr);
1091 ChunkData* const cp = summary_data().chunk(chunk_idx); 1095 RegionData* const cp = summary_data().region(region_idx);
1092 const MutableSpace* const space = _space_info[id].space(); 1096 const MutableSpace* const space = _space_info[id].space();
1093 HeapWord* const new_top = _space_info[id].new_top(); 1097 HeapWord* const new_top = _space_info[id].new_top();
1094 1098
1095 const size_t space_live = pointer_delta(new_top, space->bottom()); 1099 const size_t space_live = pointer_delta(new_top, space->bottom());
1096 const size_t dead_to_left = pointer_delta(addr, cp->destination()); 1100 const size_t dead_to_left = pointer_delta(addr, cp->destination());
1102 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " " 1106 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1103 "spl=" SIZE_FORMAT " " 1107 "spl=" SIZE_FORMAT " "
1104 "d2l=" SIZE_FORMAT " d2l%%=%6.4f " 1108 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1105 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT 1109 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1106 " ratio=%10.8f", 1110 " ratio=%10.8f",
1107 algorithm, addr, chunk_idx, 1111 algorithm, addr, region_idx,
1108 space_live, 1112 space_live,
1109 dead_to_left, dead_to_left_pct, 1113 dead_to_left, dead_to_left_pct,
1110 dead_to_right, live_to_right, 1114 dead_to_right, live_to_right,
1111 double(dead_to_right) / live_to_right); 1115 double(dead_to_right) / live_to_right);
1112 } 1116 }
1164 const double min = double(min_percent) / 100.0; 1168 const double min = double(min_percent) / 100.0;
1165 const double limit = raw_limit - _dwl_adjustment + min; 1169 const double limit = raw_limit - _dwl_adjustment + min;
1166 return MAX2(limit, 0.0); 1170 return MAX2(limit, 0.0);
1167 } 1171 }
1168 1172
1169 ParallelCompactData::ChunkData* 1173 ParallelCompactData::RegionData*
1170 PSParallelCompact::first_dead_space_chunk(const ChunkData* beg, 1174 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1171 const ChunkData* end) 1175 const RegionData* end)
1172 { 1176 {
1173 const size_t chunk_size = ParallelCompactData::ChunkSize; 1177 const size_t region_size = ParallelCompactData::RegionSize;
1174 ParallelCompactData& sd = summary_data(); 1178 ParallelCompactData& sd = summary_data();
1175 size_t left = sd.chunk(beg); 1179 size_t left = sd.region(beg);
1176 size_t right = end > beg ? sd.chunk(end) - 1 : left; 1180 size_t right = end > beg ? sd.region(end) - 1 : left;
1177 1181
1178 // Binary search. 1182 // Binary search.
1179 while (left < right) { 1183 while (left < right) {
1180 // Equivalent to (left + right) / 2, but does not overflow. 1184 // Equivalent to (left + right) / 2, but does not overflow.
1181 const size_t middle = left + (right - left) / 2; 1185 const size_t middle = left + (right - left) / 2;
1182 ChunkData* const middle_ptr = sd.chunk(middle); 1186 RegionData* const middle_ptr = sd.region(middle);
1183 HeapWord* const dest = middle_ptr->destination(); 1187 HeapWord* const dest = middle_ptr->destination();
1184 HeapWord* const addr = sd.chunk_to_addr(middle); 1188 HeapWord* const addr = sd.region_to_addr(middle);
1185 assert(dest != NULL, "sanity"); 1189 assert(dest != NULL, "sanity");
1186 assert(dest <= addr, "must move left"); 1190 assert(dest <= addr, "must move left");
1187 1191
1188 if (middle > left && dest < addr) { 1192 if (middle > left && dest < addr) {
1189 right = middle - 1; 1193 right = middle - 1;
1190 } else if (middle < right && middle_ptr->data_size() == chunk_size) { 1194 } else if (middle < right && middle_ptr->data_size() == region_size) {
1191 left = middle + 1; 1195 left = middle + 1;
1192 } else { 1196 } else {
1193 return middle_ptr; 1197 return middle_ptr;
1194 } 1198 }
1195 } 1199 }
1196 return sd.chunk(left); 1200 return sd.region(left);
1197 } 1201 }
1198 1202
1199 ParallelCompactData::ChunkData* 1203 ParallelCompactData::RegionData*
1200 PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg, 1204 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1201 const ChunkData* end, 1205 const RegionData* end,
1202 size_t dead_words) 1206 size_t dead_words)
1203 { 1207 {
1204 ParallelCompactData& sd = summary_data(); 1208 ParallelCompactData& sd = summary_data();
1205 size_t left = sd.chunk(beg); 1209 size_t left = sd.region(beg);
1206 size_t right = end > beg ? sd.chunk(end) - 1 : left; 1210 size_t right = end > beg ? sd.region(end) - 1 : left;
1207 1211
1208 // Binary search. 1212 // Binary search.
1209 while (left < right) { 1213 while (left < right) {
1210 // Equivalent to (left + right) / 2, but does not overflow. 1214 // Equivalent to (left + right) / 2, but does not overflow.
1211 const size_t middle = left + (right - left) / 2; 1215 const size_t middle = left + (right - left) / 2;
1212 ChunkData* const middle_ptr = sd.chunk(middle); 1216 RegionData* const middle_ptr = sd.region(middle);
1213 HeapWord* const dest = middle_ptr->destination(); 1217 HeapWord* const dest = middle_ptr->destination();
1214 HeapWord* const addr = sd.chunk_to_addr(middle); 1218 HeapWord* const addr = sd.region_to_addr(middle);
1215 assert(dest != NULL, "sanity"); 1219 assert(dest != NULL, "sanity");
1216 assert(dest <= addr, "must move left"); 1220 assert(dest <= addr, "must move left");
1217 1221
1218 const size_t dead_to_left = pointer_delta(addr, dest); 1222 const size_t dead_to_left = pointer_delta(addr, dest);
1219 if (middle > left && dead_to_left > dead_words) { 1223 if (middle > left && dead_to_left > dead_words) {
1222 left = middle + 1; 1226 left = middle + 1;
1223 } else { 1227 } else {
1224 return middle_ptr; 1228 return middle_ptr;
1225 } 1229 }
1226 } 1230 }
1227 return sd.chunk(left); 1231 return sd.region(left);
1228 } 1232 }
1229 1233
1230 // The result is valid during the summary phase, after the initial summarization 1234 // The result is valid during the summary phase, after the initial summarization
1231 // of each space into itself, and before final summarization. 1235 // of each space into itself, and before final summarization.
1232 inline double 1236 inline double
1233 PSParallelCompact::reclaimed_ratio(const ChunkData* const cp, 1237 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1234 HeapWord* const bottom, 1238 HeapWord* const bottom,
1235 HeapWord* const top, 1239 HeapWord* const top,
1236 HeapWord* const new_top) 1240 HeapWord* const new_top)
1237 { 1241 {
1238 ParallelCompactData& sd = summary_data(); 1242 ParallelCompactData& sd = summary_data();
1242 assert(top != NULL, "sanity"); 1246 assert(top != NULL, "sanity");
1243 assert(new_top != NULL, "sanity"); 1247 assert(new_top != NULL, "sanity");
1244 assert(top >= new_top, "summary data problem?"); 1248 assert(top >= new_top, "summary data problem?");
1245 assert(new_top > bottom, "space is empty; should not be here"); 1249 assert(new_top > bottom, "space is empty; should not be here");
1246 assert(new_top >= cp->destination(), "sanity"); 1250 assert(new_top >= cp->destination(), "sanity");
1247 assert(top >= sd.chunk_to_addr(cp), "sanity"); 1251 assert(top >= sd.region_to_addr(cp), "sanity");
1248 1252
1249 HeapWord* const destination = cp->destination(); 1253 HeapWord* const destination = cp->destination();
1250 const size_t dense_prefix_live = pointer_delta(destination, bottom); 1254 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1251 const size_t compacted_region_live = pointer_delta(new_top, destination); 1255 const size_t compacted_region_live = pointer_delta(new_top, destination);
1252 const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp)); 1256 const size_t compacted_region_used = pointer_delta(top,
1257 sd.region_to_addr(cp));
1253 const size_t reclaimable = compacted_region_used - compacted_region_live; 1258 const size_t reclaimable = compacted_region_used - compacted_region_live;
1254 1259
1255 const double divisor = dense_prefix_live + 1.25 * compacted_region_live; 1260 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1256 return double(reclaimable) / divisor; 1261 return double(reclaimable) / divisor;
1257 } 1262 }
1258 1263
1259 // Return the address of the end of the dense prefix, a.k.a. the start of the 1264 // Return the address of the end of the dense prefix, a.k.a. the start of the
1260 // compacted region. The address is always on a chunk boundary. 1265 // compacted region. The address is always on a region boundary.
1261 // 1266 //
1262 // Completely full chunks at the left are skipped, since no compaction can occur 1267 // Completely full regions at the left are skipped, since no compaction can
1263 // in those chunks. Then the maximum amount of dead wood to allow is computed, 1268 // occur in those regions. Then the maximum amount of dead wood to allow is
1264 // based on the density (amount live / capacity) of the generation; the chunk 1269 // computed, based on the density (amount live / capacity) of the generation;
1265 // with approximately that amount of dead space to the left is identified as the 1270 // the region with approximately that amount of dead space to the left is
1266 // limit chunk. Chunks between the last completely full chunk and the limit 1271 // identified as the limit region. Regions between the last completely full
1267 // chunk are scanned and the one that has the best (maximum) reclaimed_ratio() 1272 // region and the limit region are scanned and the one that has the best
1268 // is selected. 1273 // (maximum) reclaimed_ratio() is selected.
1269 HeapWord* 1274 HeapWord*
1270 PSParallelCompact::compute_dense_prefix(const SpaceId id, 1275 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1271 bool maximum_compaction) 1276 bool maximum_compaction)
1272 { 1277 {
1273 const size_t chunk_size = ParallelCompactData::ChunkSize; 1278 const size_t region_size = ParallelCompactData::RegionSize;
1274 const ParallelCompactData& sd = summary_data(); 1279 const ParallelCompactData& sd = summary_data();
1275 1280
1276 const MutableSpace* const space = _space_info[id].space(); 1281 const MutableSpace* const space = _space_info[id].space();
1277 HeapWord* const top = space->top(); 1282 HeapWord* const top = space->top();
1278 HeapWord* const top_aligned_up = sd.chunk_align_up(top); 1283 HeapWord* const top_aligned_up = sd.region_align_up(top);
1279 HeapWord* const new_top = _space_info[id].new_top(); 1284 HeapWord* const new_top = _space_info[id].new_top();
1280 HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top); 1285 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1281 HeapWord* const bottom = space->bottom(); 1286 HeapWord* const bottom = space->bottom();
1282 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom); 1287 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1283 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); 1288 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1284 const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up); 1289 const RegionData* const new_top_cp =
1285 1290 sd.addr_to_region_ptr(new_top_aligned_up);
1286 // Skip full chunks at the beginning of the space--they are necessarily part 1291
1292 // Skip full regions at the beginning of the space--they are necessarily part
1287 // of the dense prefix. 1293 // of the dense prefix.
1288 const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp); 1294 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1289 assert(full_cp->destination() == sd.chunk_to_addr(full_cp) || 1295 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1290 space->is_empty(), "no dead space allowed to the left"); 1296 space->is_empty(), "no dead space allowed to the left");
1291 assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1, 1297 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1292 "chunk must have dead space"); 1298 "region must have dead space");
1293 1299
1294 // The gc number is saved whenever a maximum compaction is done, and used to 1300 // The gc number is saved whenever a maximum compaction is done, and used to
1295 // determine when the maximum compaction interval has expired. This avoids 1301 // determine when the maximum compaction interval has expired. This avoids
1296 // successive max compactions for different reasons. 1302 // successive max compactions for different reasons.
1297 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); 1303 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1298 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; 1304 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1299 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval || 1305 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1300 total_invocations() == HeapFirstMaximumCompactionCount; 1306 total_invocations() == HeapFirstMaximumCompactionCount;
1301 if (maximum_compaction || full_cp == top_cp || interval_ended) { 1307 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1302 _maximum_compaction_gc_num = total_invocations(); 1308 _maximum_compaction_gc_num = total_invocations();
1303 return sd.chunk_to_addr(full_cp); 1309 return sd.region_to_addr(full_cp);
1304 } 1310 }
1305 1311
1306 const size_t space_live = pointer_delta(new_top, bottom); 1312 const size_t space_live = pointer_delta(new_top, bottom);
1307 const size_t space_used = space->used_in_words(); 1313 const size_t space_used = space->used_in_words();
1308 const size_t space_capacity = space->capacity_in_words(); 1314 const size_t space_capacity = space->capacity_in_words();
1324 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT, 1330 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1325 density, min_percent_free, limiter, 1331 density, min_percent_free, limiter,
1326 dead_wood_max, dead_wood_limit); 1332 dead_wood_max, dead_wood_limit);
1327 } 1333 }
1328 1334
1329 // Locate the chunk with the desired amount of dead space to the left. 1335 // Locate the region with the desired amount of dead space to the left.
1330 const ChunkData* const limit_cp = 1336 const RegionData* const limit_cp =
1331 dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit); 1337 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1332 1338
1333 // Scan from the first chunk with dead space to the limit chunk and find the 1339 // Scan from the first region with dead space to the limit region and find the
1334 // one with the best (largest) reclaimed ratio. 1340 // one with the best (largest) reclaimed ratio.
1335 double best_ratio = 0.0; 1341 double best_ratio = 0.0;
1336 const ChunkData* best_cp = full_cp; 1342 const RegionData* best_cp = full_cp;
1337 for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) { 1343 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1338 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top); 1344 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1339 if (tmp_ratio > best_ratio) { 1345 if (tmp_ratio > best_ratio) {
1340 best_cp = cp; 1346 best_cp = cp;
1341 best_ratio = tmp_ratio; 1347 best_ratio = tmp_ratio;
1342 } 1348 }
1343 } 1349 }
1344 1350
1345 #if 0 1351 #if 0
1346 // Something to consider: if the chunk with the best ratio is 'close to' the 1352 // Something to consider: if the region with the best ratio is 'close to' the
1347 // first chunk w/free space, choose the first chunk with free space 1353 // first region w/free space, choose the first region with free space
1348 // ("first-free"). The first-free chunk is usually near the start of the 1354 // ("first-free"). The first-free region is usually near the start of the
1349 // heap, which means we are copying most of the heap already, so copy a bit 1355 // heap, which means we are copying most of the heap already, so copy a bit
1350 // more to get complete compaction. 1356 // more to get complete compaction.
1351 if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) { 1357 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
1352 _maximum_compaction_gc_num = total_invocations(); 1358 _maximum_compaction_gc_num = total_invocations();
1353 best_cp = full_cp; 1359 best_cp = full_cp;
1354 } 1360 }
1355 #endif // #if 0 1361 #endif // #if 0
1356 1362
1357 return sd.chunk_to_addr(best_cp); 1363 return sd.region_to_addr(best_cp);
1358 } 1364 }
1359 1365
1360 void PSParallelCompact::summarize_spaces_quick() 1366 void PSParallelCompact::summarize_spaces_quick()
1361 { 1367 {
1362 for (unsigned int i = 0; i < last_space_id; ++i) { 1368 for (unsigned int i = 0; i < last_space_id; ++i) {
1370 } 1376 }
1371 1377
1372 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) 1378 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1373 { 1379 {
1374 HeapWord* const dense_prefix_end = dense_prefix(id); 1380 HeapWord* const dense_prefix_end = dense_prefix(id);
1375 const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end); 1381 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1376 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end); 1382 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1377 if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) { 1383 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1378 // Only enough dead space is filled so that any remaining dead space to the 1384 // Only enough dead space is filled so that any remaining dead space to the
1379 // left is larger than the minimum filler object. (The remainder is filled 1385 // left is larger than the minimum filler object. (The remainder is filled
1380 // during the copy/update phase.) 1386 // during the copy/update phase.)
1381 // 1387 //
1382 // The size of the dead space to the right of the boundary is not a 1388 // The size of the dead space to the right of the boundary is not a
1463 // a fragment of dead space that is too small to fill with an object. 1469 // a fragment of dead space that is too small to fill with an object.
1464 if (!maximum_compaction && dense_prefix_end != space->bottom()) { 1470 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1465 fill_dense_prefix_end(id); 1471 fill_dense_prefix_end(id);
1466 } 1472 }
1467 1473
1468 // Compute the destination of each Chunk, and thus each object. 1474 // Compute the destination of each Region, and thus each object.
1469 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); 1475 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1470 _summary_data.summarize(dense_prefix_end, space->end(), 1476 _summary_data.summarize(dense_prefix_end, space->end(),
1471 dense_prefix_end, space->top(), 1477 dense_prefix_end, space->top(),
1472 _space_info[id].new_top_addr()); 1478 _space_info[id].new_top_addr());
1473 } 1479 }
1474 1480
1475 if (TraceParallelOldGCSummaryPhase) { 1481 if (TraceParallelOldGCSummaryPhase) {
1476 const size_t chunk_size = ParallelCompactData::ChunkSize; 1482 const size_t region_size = ParallelCompactData::RegionSize;
1477 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix(); 1483 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1478 const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end); 1484 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1479 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom()); 1485 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1480 HeapWord* const new_top = _space_info[id].new_top(); 1486 HeapWord* const new_top = _space_info[id].new_top();
1481 const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(new_top); 1487 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1482 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); 1488 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1483 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " 1489 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1484 "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " 1490 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1485 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, 1491 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1486 id, space->capacity_in_words(), dense_prefix_end, 1492 id, space->capacity_in_words(), dense_prefix_end,
1487 dp_chunk, dp_words / chunk_size, 1493 dp_region, dp_words / region_size,
1488 cr_words / chunk_size, new_top); 1494 cr_words / region_size, new_top);
1489 } 1495 }
1490 } 1496 }
1491 1497
1492 void PSParallelCompact::summary_phase(ParCompactionManager* cm, 1498 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1493 bool maximum_compaction) 1499 bool maximum_compaction)
1511 summarize_spaces_quick(); 1517 summarize_spaces_quick();
1512 1518
1513 if (TraceParallelOldGCSummaryPhase) { 1519 if (TraceParallelOldGCSummaryPhase) {
1514 tty->print_cr("summary_phase: after summarizing each space to self"); 1520 tty->print_cr("summary_phase: after summarizing each space to self");
1515 Universe::print(); 1521 Universe::print();
1516 NOT_PRODUCT(print_chunk_ranges()); 1522 NOT_PRODUCT(print_region_ranges());
1517 if (Verbose) { 1523 if (Verbose) {
1518 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); 1524 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1519 } 1525 }
1520 } 1526 }
1521 1527
1557 } 1563 }
1558 _summary_data.summarize(*new_top_addr, target_space_end, 1564 _summary_data.summarize(*new_top_addr, target_space_end,
1559 space->bottom(), space->top(), 1565 space->bottom(), space->top(),
1560 new_top_addr); 1566 new_top_addr);
1561 1567
1562 // Clear the source_chunk field for each chunk in the space. 1568 // Clear the source_region field for each region in the space.
1563 HeapWord* const new_top = _space_info[id].new_top(); 1569 HeapWord* const new_top = _space_info[id].new_top();
1564 HeapWord* const clear_end = _summary_data.chunk_align_up(new_top); 1570 HeapWord* const clear_end = _summary_data.region_align_up(new_top);
1565 ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom()); 1571 RegionData* beg_region =
1566 ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(clear_end); 1572 _summary_data.addr_to_region_ptr(space->bottom());
1567 while (beg_chunk < end_chunk) { 1573 RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
1568 beg_chunk->set_source_chunk(0); 1574 while (beg_region < end_region) {
1569 ++beg_chunk; 1575 beg_region->set_source_region(0);
1576 ++beg_region;
1570 } 1577 }
1571 1578
1572 // Reset the new_top value for the space. 1579 // Reset the new_top value for the space.
1573 _space_info[id].set_new_top(space->bottom()); 1580 _space_info[id].set_new_top(space->bottom());
1574 } 1581 }
1575 } 1582 }
1576 1583
1577 // Fill in the block data after any changes to the chunks have 1584 // Fill in the block data after any changes to the regions have
1578 // been made. 1585 // been made.
1579 #ifdef ASSERT 1586 #ifdef ASSERT
1580 summarize_blocks(cm, perm_space_id); 1587 summarize_blocks(cm, perm_space_id);
1581 summarize_blocks(cm, old_space_id); 1588 summarize_blocks(cm, old_space_id);
1582 #else 1589 #else
1583 if (!UseParallelOldGCChunkPointerCalc) { 1590 if (!UseParallelOldGCRegionPointerCalc) {
1584 summarize_blocks(cm, perm_space_id); 1591 summarize_blocks(cm, perm_space_id);
1585 summarize_blocks(cm, old_space_id); 1592 summarize_blocks(cm, old_space_id);
1586 } 1593 }
1587 #endif 1594 #endif
1588 1595
1589 if (TraceParallelOldGCSummaryPhase) { 1596 if (TraceParallelOldGCSummaryPhase) {
1590 tty->print_cr("summary_phase: after final summarization"); 1597 tty->print_cr("summary_phase: after final summarization");
1591 Universe::print(); 1598 Universe::print();
1592 NOT_PRODUCT(print_chunk_ranges()); 1599 NOT_PRODUCT(print_region_ranges());
1593 if (Verbose) { 1600 if (Verbose) {
1594 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info)); 1601 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1595 } 1602 }
1596 } 1603 }
1597 } 1604 }
1598 1605
1599 // Fill in the BlockData. 1606 // Fill in the BlockData.
1600 // Iterate over the spaces and within each space iterate over 1607 // Iterate over the spaces and within each space iterate over
1601 // the chunks and fill in the BlockData for each chunk. 1608 // the regions and fill in the BlockData for each region.
1602 1609
1603 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm, 1610 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
1604 SpaceId first_compaction_space_id) { 1611 SpaceId first_compaction_space_id) {
1605 #if 0 1612 #if 0
1606 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);) 1613 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);)
1607 for (SpaceId cur_space_id = first_compaction_space_id; 1614 for (SpaceId cur_space_id = first_compaction_space_id;
1608 cur_space_id != last_space_id; 1615 cur_space_id != last_space_id;
1609 cur_space_id = next_compaction_space_id(cur_space_id)) { 1616 cur_space_id = next_compaction_space_id(cur_space_id)) {
1610 // Iterate over the chunks in the space 1617 // Iterate over the regions in the space
1611 size_t start_chunk_index = 1618 size_t start_region_index =
1612 _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom()); 1619 _summary_data.addr_to_region_idx(space(cur_space_id)->bottom());
1613 BitBlockUpdateClosure bbu(mark_bitmap(), 1620 BitBlockUpdateClosure bbu(mark_bitmap(),
1614 cm, 1621 cm,
1615 start_chunk_index); 1622 start_region_index);
1616 // Iterate over blocks. 1623 // Iterate over blocks.
1617 for (size_t chunk_index = start_chunk_index; 1624 for (size_t region_index = start_region_index;
1618 chunk_index < _summary_data.chunk_count() && 1625 region_index < _summary_data.region_count() &&
1619 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top(); 1626 _summary_data.region_to_addr(region_index) <
1620 chunk_index++) { 1627 space(cur_space_id)->top();
1621 1628 region_index++) {
1622 // Reset the closure for the new chunk. Note that the closure 1629
1623 // maintains some data that does not get reset for each chunk 1630 // Reset the closure for the new region. Note that the closure
1631 // maintains some data that does not get reset for each region
1624 // so a new instance of the closure is no appropriate. 1632 // so a new instance of the closure is no appropriate.
1625 bbu.reset_chunk(chunk_index); 1633 bbu.reset_region(region_index);
1626 1634
1627 // Start the iteration with the first live object. This 1635 // Start the iteration with the first live object. This
1628 // may return the end of the chunk. That is acceptable since 1636 // may return the end of the region. That is acceptable since
1629 // it will properly limit the iterations. 1637 // it will properly limit the iterations.
1630 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit( 1638 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
1631 _summary_data.first_live_or_end_in_chunk(chunk_index)); 1639 _summary_data.first_live_or_end_in_region(region_index));
1632 1640
1633 // End the iteration at the end of the chunk. 1641 // End the iteration at the end of the region.
1634 HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index); 1642 HeapWord* region_addr = _summary_data.region_to_addr(region_index);
1635 HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize; 1643 HeapWord* region_end = region_addr + ParallelCompactData::RegionSize;
1636 ParMarkBitMap::idx_t right_offset = 1644 ParMarkBitMap::idx_t right_offset =
1637 mark_bitmap()->addr_to_bit(chunk_end); 1645 mark_bitmap()->addr_to_bit(region_end);
1638 1646
1639 // Blocks that have not objects starting in them can be 1647 // Blocks that have not objects starting in them can be
1640 // skipped because their data will never be used. 1648 // skipped because their data will never be used.
1641 if (left_offset < right_offset) { 1649 if (left_offset < right_offset) {
1642 1650
1643 // Iterate through the objects in the chunk. 1651 // Iterate through the objects in the region.
1644 ParMarkBitMap::idx_t last_offset = 1652 ParMarkBitMap::idx_t last_offset =
1645 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset); 1653 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
1646 1654
1647 // If last_offset is less than right_offset, then the iterations 1655 // If last_offset is less than right_offset, then the iterations
1648 // terminated while it was looking for an end bit. "last_offset" 1656 // terminated while it was looking for an end bit. "last_offset"
1649 // is then the offset for the last start bit. In this situation 1657 // is then the offset for the last start bit. In this situation
1650 // the "offset" field for the next block to the right (_cur_block + 1) 1658 // the "offset" field for the next block to the right (_cur_block + 1)
1651 // will not have been update although there may be live data 1659 // will not have been update although there may be live data
1652 // to the left of the chunk. 1660 // to the left of the region.
1653 1661
1654 size_t cur_block_plus_1 = bbu.cur_block() + 1; 1662 size_t cur_block_plus_1 = bbu.cur_block() + 1;
1655 HeapWord* cur_block_plus_1_addr = 1663 HeapWord* cur_block_plus_1_addr =
1656 _summary_data.block_to_addr(bbu.cur_block()) + 1664 _summary_data.block_to_addr(bbu.cur_block()) +
1657 ParallelCompactData::BlockSize; 1665 ParallelCompactData::BlockSize;
1667 MAX2(bbu.cur_block() + 1, 1675 MAX2(bbu.cur_block() + 1,
1668 _summary_data.addr_to_block_idx(last_offset_addr)); 1676 _summary_data.addr_to_block_idx(last_offset_addr));
1669 #else 1677 #else
1670 // The current block has already been updated. The only block 1678 // The current block has already been updated. The only block
1671 // that remains to be updated is the block where the last 1679 // that remains to be updated is the block where the last
1672 // object in the chunk starts. 1680 // object in the region starts.
1673 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr); 1681 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
1674 #endif 1682 #endif
1675 assert_bit_is_start(last_offset); 1683 assert_bit_is_start(last_offset);
1676 assert((last_block == _summary_data.block_count()) || 1684 assert((last_block == _summary_data.block_count()) ||
1677 (_summary_data.block(last_block)->raw_offset() == 0), 1685 (_summary_data.block(last_block)->raw_offset() == 0),
1678 "Should not have been set"); 1686 "Should not have been set");
1679 // Is the last block still in the current chunk? If still 1687 // Is the last block still in the current region? If still
1680 // in this chunk, update the last block (the counting that 1688 // in this region, update the last block (the counting that
1681 // included the current block is meant for the offset of the last 1689 // included the current block is meant for the offset of the last
1682 // block). If not in this chunk, do nothing. Should not 1690 // block). If not in this region, do nothing. Should not
1683 // update a block in the next chunk. 1691 // update a block in the next region.
1684 if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(), 1692 if (ParallelCompactData::region_contains_block(bbu.region_index(),
1685 last_block)) { 1693 last_block)) {
1686 if (last_offset < right_offset) { 1694 if (last_offset < right_offset) {
1687 // The last object started in this chunk but ends beyond 1695 // The last object started in this region but ends beyond
1688 // this chunk. Update the block for this last object. 1696 // this region. Update the block for this last object.
1689 assert(mark_bitmap()->is_marked(last_offset), "Should be marked"); 1697 assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
1690 // No end bit was found. The closure takes care of 1698 // No end bit was found. The closure takes care of
1691 // the cases where 1699 // the cases where
1692 // an objects crosses over into the next block 1700 // an objects crosses over into the next block
1693 // an objects starts and ends in the next block 1701 // an objects starts and ends in the next block
1694 // It does not handle the case where an object is 1702 // It does not handle the case where an object is
1695 // the first object in a later block and extends 1703 // the first object in a later block and extends
1696 // past the end of the chunk (i.e., the closure 1704 // past the end of the region (i.e., the closure
1697 // only handles complete objects that are in the range 1705 // only handles complete objects that are in the range
1698 // it is given). That object is handed back here 1706 // it is given). That object is handed back here
1699 // for any special consideration necessary. 1707 // for any special consideration necessary.
1700 // 1708 //
1701 // Is the first bit in the last block a start or end bit? 1709 // Is the first bit in the last block a start or end bit?
1707 // block? A block AA will already have been updated if an 1715 // block? A block AA will already have been updated if an
1708 // object ends in the next block AA+1. An object found to end in 1716 // object ends in the next block AA+1. An object found to end in
1709 // the AA+1 is the trigger that updates AA. Objects are being 1717 // the AA+1 is the trigger that updates AA. Objects are being
1710 // counted in the current block for updaing a following 1718 // counted in the current block for updaing a following
1711 // block. An object may start in later block 1719 // block. An object may start in later block
1712 // block but may extend beyond the last block in the chunk. 1720 // block but may extend beyond the last block in the region.
1713 // Updates are only done when the end of an object has been 1721 // Updates are only done when the end of an object has been
1714 // found. If the last object (covered by block L) starts 1722 // found. If the last object (covered by block L) starts
1715 // beyond the current block, then no object ends in L (otherwise 1723 // beyond the current block, then no object ends in L (otherwise
1716 // L would be the current block). So the first bit in L is 1724 // L would be the current block). So the first bit in L is
1717 // a start bit. 1725 // a start bit.
1718 // 1726 //
1719 // Else the last objects start in the current block and ends 1727 // Else the last objects start in the current block and ends
1720 // beyond the chunk. The current block has already been 1728 // beyond the region. The current block has already been
1721 // updated and there is no later block (with an object 1729 // updated and there is no later block (with an object
1722 // starting in it) that needs to be updated. 1730 // starting in it) that needs to be updated.
1723 // 1731 //
1724 if (_summary_data.partial_obj_ends_in_block(last_block)) { 1732 if (_summary_data.partial_obj_ends_in_block(last_block)) {
1725 _summary_data.block(last_block)->set_end_bit_offset( 1733 _summary_data.block(last_block)->set_end_bit_offset(
1726 bbu.live_data_left()); 1734 bbu.live_data_left());
1727 } else if (last_offset_addr >= cur_block_plus_1_addr) { 1735 } else if (last_offset_addr >= cur_block_plus_1_addr) {
1728 // The start of the object is on a later block 1736 // The start of the object is on a later block
1729 // (to the right of the current block and there are no 1737 // (to the right of the current block and there are no
1730 // complete live objects to the left of this last object 1738 // complete live objects to the left of this last object
1731 // within the chunk. 1739 // within the region.
1732 // The first bit in the block is for the start of the 1740 // The first bit in the block is for the start of the
1733 // last object. 1741 // last object.
1734 _summary_data.block(last_block)->set_start_bit_offset( 1742 _summary_data.block(last_block)->set_start_bit_offset(
1735 bbu.live_data_left()); 1743 bbu.live_data_left());
1736 } else { 1744 } else {
1737 // The start of the last object was found in 1745 // The start of the last object was found in
1738 // the current chunk (which has already 1746 // the current region (which has already
1739 // been updated). 1747 // been updated).
1740 assert(bbu.cur_block() == 1748 assert(bbu.cur_block() ==
1741 _summary_data.addr_to_block_idx(last_offset_addr), 1749 _summary_data.addr_to_block_idx(last_offset_addr),
1742 "Should be a block already processed"); 1750 "Should be a block already processed");
1743 } 1751 }
1744 #ifdef ASSERT 1752 #ifdef ASSERT
1745 // Is there enough block information to find this object? 1753 // Is there enough block information to find this object?
1746 // The destination of the chunk has not been set so the 1754 // The destination of the region has not been set so the
1747 // values returned by calc_new_pointer() and 1755 // values returned by calc_new_pointer() and
1748 // block_calc_new_pointer() will only be 1756 // block_calc_new_pointer() will only be
1749 // offsets. But they should agree. 1757 // offsets. But they should agree.
1750 HeapWord* moved_obj_with_chunks = 1758 HeapWord* moved_obj_with_regions =
1751 _summary_data.chunk_calc_new_pointer(last_offset_addr); 1759 _summary_data.region_calc_new_pointer(last_offset_addr);
1752 HeapWord* moved_obj_with_blocks = 1760 HeapWord* moved_obj_with_blocks =
1753 _summary_data.calc_new_pointer(last_offset_addr); 1761 _summary_data.calc_new_pointer(last_offset_addr);
1754 assert(moved_obj_with_chunks == moved_obj_with_blocks, 1762 assert(moved_obj_with_regions == moved_obj_with_blocks,
1755 "Block calculation is wrong"); 1763 "Block calculation is wrong");
1756 #endif 1764 #endif
1757 } else if (last_block < _summary_data.block_count()) { 1765 } else if (last_block < _summary_data.block_count()) {
1758 // Iterations ended looking for a start bit (but 1766 // Iterations ended looking for a start bit (but
1759 // did not run off the end of the block table). 1767 // did not run off the end of the block table).
1762 } 1770 }
1763 } 1771 }
1764 #ifdef ASSERT 1772 #ifdef ASSERT
1765 // Is there enough block information to find this object? 1773 // Is there enough block information to find this object?
1766 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset); 1774 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
1767 HeapWord* moved_obj_with_chunks = 1775 HeapWord* moved_obj_with_regions =
1768 _summary_data.calc_new_pointer(left_offset_addr); 1776 _summary_data.calc_new_pointer(left_offset_addr);
1769 HeapWord* moved_obj_with_blocks = 1777 HeapWord* moved_obj_with_blocks =
1770 _summary_data.calc_new_pointer(left_offset_addr); 1778 _summary_data.calc_new_pointer(left_offset_addr);
1771 assert(moved_obj_with_chunks == moved_obj_with_blocks, 1779 assert(moved_obj_with_regions == moved_obj_with_blocks,
1772 "Block calculation is wrong"); 1780 "Block calculation is wrong");
1773 #endif 1781 #endif
1774 1782
1775 // Is there another block after the end of this chunk? 1783 // Is there another block after the end of this region?
1776 #ifdef ASSERT 1784 #ifdef ASSERT
1777 if (last_block < _summary_data.block_count()) { 1785 if (last_block < _summary_data.block_count()) {
1778 // No object may have been found in a block. If that 1786 // No object may have been found in a block. If that
1779 // block is at the end of the chunk, the iteration will 1787 // block is at the end of the region, the iteration will
1780 // terminate without incrementing the current block so 1788 // terminate without incrementing the current block so
1781 // that the current block is not the last block in the 1789 // that the current block is not the last block in the
1782 // chunk. That situation precludes asserting that the 1790 // region. That situation precludes asserting that the
1783 // current block is the last block in the chunk. Assert 1791 // current block is the last block in the region. Assert
1784 // the lesser condition that the current block does not 1792 // the lesser condition that the current block does not
1785 // exceed the chunk. 1793 // exceed the region.
1786 assert(_summary_data.block_to_addr(last_block) <= 1794 assert(_summary_data.block_to_addr(last_block) <=
1787 (_summary_data.chunk_to_addr(chunk_index) + 1795 (_summary_data.region_to_addr(region_index) +
1788 ParallelCompactData::ChunkSize), 1796 ParallelCompactData::RegionSize),
1789 "Chunk and block inconsistency"); 1797 "Region and block inconsistency");
1790 assert(last_offset <= right_offset, "Iteration over ran end"); 1798 assert(last_offset <= right_offset, "Iteration over ran end");
1791 } 1799 }
1792 #endif 1800 #endif
1793 } 1801 }
1794 #ifdef ASSERT 1802 #ifdef ASSERT
1795 if (PrintGCDetails && Verbose) { 1803 if (PrintGCDetails && Verbose) {
1796 if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) { 1804 if (_summary_data.region(region_index)->partial_obj_size() == 1) {
1797 size_t first_block = 1805 size_t first_block =
1798 chunk_index / ParallelCompactData::BlocksPerChunk; 1806 region_index / ParallelCompactData::BlocksPerRegion;
1799 gclog_or_tty->print_cr("first_block " PTR_FORMAT 1807 gclog_or_tty->print_cr("first_block " PTR_FORMAT
1800 " _offset " PTR_FORMAT 1808 " _offset " PTR_FORMAT
1801 "_first_is_start_bit %d", 1809 "_first_is_start_bit %d",
1802 first_block, 1810 first_block,
1803 _summary_data.block(first_block)->raw_offset(), 1811 _summary_data.block(first_block)->raw_offset(),
1843 1851
1844 PSParallelCompact::invoke_no_policy(maximum_heap_compaction); 1852 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
1845 } 1853 }
1846 } 1854 }
1847 1855
1848 bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) { 1856 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
1849 size_t addr_chunk_index = addr_to_chunk_idx(addr); 1857 size_t addr_region_index = addr_to_region_idx(addr);
1850 return chunk_index == addr_chunk_index; 1858 return region_index == addr_region_index;
1851 } 1859 }
1852 1860
1853 bool ParallelCompactData::chunk_contains_block(size_t chunk_index, 1861 bool ParallelCompactData::region_contains_block(size_t region_index,
1854 size_t block_index) { 1862 size_t block_index) {
1855 size_t first_block_in_chunk = chunk_index * BlocksPerChunk; 1863 size_t first_block_in_region = region_index * BlocksPerRegion;
1856 size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1; 1864 size_t last_block_in_region = (region_index + 1) * BlocksPerRegion - 1;
1857 1865
1858 return (first_block_in_chunk <= block_index) && 1866 return (first_block_in_region <= block_index) &&
1859 (block_index <= last_block_in_chunk); 1867 (block_index <= last_block_in_region);
1860 } 1868 }
1861 1869
1862 // This method contains no policy. You should probably 1870 // This method contains no policy. You should probably
1863 // be calling invoke() instead. 1871 // be calling invoke() instead.
1864 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { 1872 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
2203 EventMark m("1 mark object"); 2211 EventMark m("1 mark object");
2204 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); 2212 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
2205 2213
2206 ParallelScavengeHeap* heap = gc_heap(); 2214 ParallelScavengeHeap* heap = gc_heap();
2207 uint parallel_gc_threads = heap->gc_task_manager()->workers(); 2215 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2208 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); 2216 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2209 ParallelTaskTerminator terminator(parallel_gc_threads, qset); 2217 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2210 2218
2211 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); 2219 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2212 PSParallelCompact::FollowStackClosure follow_stack_closure(cm); 2220 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2213 2221
2341 2349
2342 gc_heap()->perm_gen()->start_array()->reset(); 2350 gc_heap()->perm_gen()->start_array()->reset();
2343 move_and_update(cm, perm_space_id); 2351 move_and_update(cm, perm_space_id);
2344 } 2352 }
2345 2353
2346 void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q, 2354 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2347 uint parallel_gc_threads) { 2355 uint parallel_gc_threads)
2356 {
2348 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); 2357 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
2349 2358
2350 const unsigned int task_count = MAX2(parallel_gc_threads, 1U); 2359 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
2351 for (unsigned int j = 0; j < task_count; j++) { 2360 for (unsigned int j = 0; j < task_count; j++) {
2352 q->enqueue(new DrainStacksCompactionTask()); 2361 q->enqueue(new DrainStacksCompactionTask());
2353 } 2362 }
2354 2363
2355 // Find all chunks that are available (can be filled immediately) and 2364 // Find all regions that are available (can be filled immediately) and
2356 // distribute them to the thread stacks. The iteration is done in reverse 2365 // distribute them to the thread stacks. The iteration is done in reverse
2357 // order (high to low) so the chunks will be removed in ascending order. 2366 // order (high to low) so the regions will be removed in ascending order.
2358 2367
2359 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 2368 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2360 2369
2361 size_t fillable_chunks = 0; // A count for diagnostic purposes. 2370 size_t fillable_regions = 0; // A count for diagnostic purposes.
2362 unsigned int which = 0; // The worker thread number. 2371 unsigned int which = 0; // The worker thread number.
2363 2372
2364 for (unsigned int id = to_space_id; id > perm_space_id; --id) { 2373 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2365 SpaceInfo* const space_info = _space_info + id; 2374 SpaceInfo* const space_info = _space_info + id;
2366 MutableSpace* const space = space_info->space(); 2375 MutableSpace* const space = space_info->space();
2367 HeapWord* const new_top = space_info->new_top(); 2376 HeapWord* const new_top = space_info->new_top();
2368 2377
2369 const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix()); 2378 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2370 const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top)); 2379 const size_t end_region =
2371 assert(end_chunk > 0, "perm gen cannot be empty"); 2380 sd.addr_to_region_idx(sd.region_align_up(new_top));
2372 2381 assert(end_region > 0, "perm gen cannot be empty");
2373 for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) { 2382
2374 if (sd.chunk(cur)->claim_unsafe()) { 2383 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
2384 if (sd.region(cur)->claim_unsafe()) {
2375 ParCompactionManager* cm = ParCompactionManager::manager_array(which); 2385 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
2376 cm->save_for_processing(cur); 2386 cm->save_for_processing(cur);
2377 2387
2378 if (TraceParallelOldGCCompactionPhase && Verbose) { 2388 if (TraceParallelOldGCCompactionPhase && Verbose) {
2379 const size_t count_mod_8 = fillable_chunks & 7; 2389 const size_t count_mod_8 = fillable_regions & 7;
2380 if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); 2390 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2381 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur); 2391 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2382 if (count_mod_8 == 7) gclog_or_tty->cr(); 2392 if (count_mod_8 == 7) gclog_or_tty->cr();
2383 } 2393 }
2384 2394
2385 NOT_PRODUCT(++fillable_chunks;) 2395 NOT_PRODUCT(++fillable_regions;)
2386 2396
2387 // Assign chunks to threads in round-robin fashion. 2397 // Assign regions to threads in round-robin fashion.
2388 if (++which == task_count) { 2398 if (++which == task_count) {
2389 which = 0; 2399 which = 0;
2390 } 2400 }
2391 } 2401 }
2392 } 2402 }
2393 } 2403 }
2394 2404
2395 if (TraceParallelOldGCCompactionPhase) { 2405 if (TraceParallelOldGCCompactionPhase) {
2396 if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr(); 2406 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2397 gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks); 2407 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2398 } 2408 }
2399 } 2409 }
2400 2410
2401 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 2411 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2402 2412
2405 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); 2415 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2406 2416
2407 ParallelCompactData& sd = PSParallelCompact::summary_data(); 2417 ParallelCompactData& sd = PSParallelCompact::summary_data();
2408 2418
2409 // Iterate over all the spaces adding tasks for updating 2419 // Iterate over all the spaces adding tasks for updating
2410 // chunks in the dense prefix. Assume that 1 gc thread 2420 // regions in the dense prefix. Assume that 1 gc thread
2411 // will work on opening the gaps and the remaining gc threads 2421 // will work on opening the gaps and the remaining gc threads
2412 // will work on the dense prefix. 2422 // will work on the dense prefix.
2413 SpaceId space_id = old_space_id; 2423 SpaceId space_id = old_space_id;
2414 while (space_id != last_space_id) { 2424 while (space_id != last_space_id) {
2415 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); 2425 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2419 // There is no dense prefix for this space. 2429 // There is no dense prefix for this space.
2420 space_id = next_compaction_space_id(space_id); 2430 space_id = next_compaction_space_id(space_id);
2421 continue; 2431 continue;
2422 } 2432 }
2423 2433
2424 // The dense prefix is before this chunk. 2434 // The dense prefix is before this region.
2425 size_t chunk_index_end_dense_prefix = 2435 size_t region_index_end_dense_prefix =
2426 sd.addr_to_chunk_idx(dense_prefix_end); 2436 sd.addr_to_region_idx(dense_prefix_end);
2427 ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix); 2437 RegionData* const dense_prefix_cp =
2438 sd.region(region_index_end_dense_prefix);
2428 assert(dense_prefix_end == space->end() || 2439 assert(dense_prefix_end == space->end() ||
2429 dense_prefix_cp->available() || 2440 dense_prefix_cp->available() ||
2430 dense_prefix_cp->claimed(), 2441 dense_prefix_cp->claimed(),
2431 "The chunk after the dense prefix should always be ready to fill"); 2442 "The region after the dense prefix should always be ready to fill");
2432 2443
2433 size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom()); 2444 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2434 2445
2435 // Is there dense prefix work? 2446 // Is there dense prefix work?
2436 size_t total_dense_prefix_chunks = 2447 size_t total_dense_prefix_regions =
2437 chunk_index_end_dense_prefix - chunk_index_start; 2448 region_index_end_dense_prefix - region_index_start;
2438 // How many chunks of the dense prefix should be given to 2449 // How many regions of the dense prefix should be given to
2439 // each thread? 2450 // each thread?
2440 if (total_dense_prefix_chunks > 0) { 2451 if (total_dense_prefix_regions > 0) {
2441 uint tasks_for_dense_prefix = 1; 2452 uint tasks_for_dense_prefix = 1;
2442 if (UseParallelDensePrefixUpdate) { 2453 if (UseParallelDensePrefixUpdate) {
2443 if (total_dense_prefix_chunks <= 2454 if (total_dense_prefix_regions <=
2444 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { 2455 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2445 // Don't over partition. This assumes that 2456 // Don't over partition. This assumes that
2446 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value 2457 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2447 // so there are not many chunks to process. 2458 // so there are not many regions to process.
2448 tasks_for_dense_prefix = parallel_gc_threads; 2459 tasks_for_dense_prefix = parallel_gc_threads;
2449 } else { 2460 } else {
2450 // Over partition 2461 // Over partition
2451 tasks_for_dense_prefix = parallel_gc_threads * 2462 tasks_for_dense_prefix = parallel_gc_threads *
2452 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; 2463 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2453 } 2464 }
2454 } 2465 }
2455 size_t chunks_per_thread = total_dense_prefix_chunks / 2466 size_t regions_per_thread = total_dense_prefix_regions /
2456 tasks_for_dense_prefix; 2467 tasks_for_dense_prefix;
2457 // Give each thread at least 1 chunk. 2468 // Give each thread at least 1 region.
2458 if (chunks_per_thread == 0) { 2469 if (regions_per_thread == 0) {
2459 chunks_per_thread = 1; 2470 regions_per_thread = 1;
2460 } 2471 }
2461 2472
2462 for (uint k = 0; k < tasks_for_dense_prefix; k++) { 2473 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2463 if (chunk_index_start >= chunk_index_end_dense_prefix) { 2474 if (region_index_start >= region_index_end_dense_prefix) {
2464 break; 2475 break;
2465 } 2476 }
2466 // chunk_index_end is not processed 2477 // region_index_end is not processed
2467 size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread, 2478 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2468 chunk_index_end_dense_prefix); 2479 region_index_end_dense_prefix);
2469 q->enqueue(new UpdateDensePrefixTask( 2480 q->enqueue(new UpdateDensePrefixTask(
2470 space_id, 2481 space_id,
2471 chunk_index_start, 2482 region_index_start,
2472 chunk_index_end)); 2483 region_index_end));
2473 chunk_index_start = chunk_index_end; 2484 region_index_start = region_index_end;
2474 } 2485 }
2475 } 2486 }
2476 // This gets any part of the dense prefix that did not 2487 // This gets any part of the dense prefix that did not
2477 // fit evenly. 2488 // fit evenly.
2478 if (chunk_index_start < chunk_index_end_dense_prefix) { 2489 if (region_index_start < region_index_end_dense_prefix) {
2479 q->enqueue(new UpdateDensePrefixTask( 2490 q->enqueue(new UpdateDensePrefixTask(
2480 space_id, 2491 space_id,
2481 chunk_index_start, 2492 region_index_start,
2482 chunk_index_end_dense_prefix)); 2493 region_index_end_dense_prefix));
2483 } 2494 }
2484 space_id = next_compaction_space_id(space_id); 2495 space_id = next_compaction_space_id(space_id);
2485 } // End tasks for dense prefix 2496 } // End tasks for dense prefix
2486 } 2497 }
2487 2498
2488 void PSParallelCompact::enqueue_chunk_stealing_tasks( 2499 void PSParallelCompact::enqueue_region_stealing_tasks(
2489 GCTaskQueue* q, 2500 GCTaskQueue* q,
2490 ParallelTaskTerminator* terminator_ptr, 2501 ParallelTaskTerminator* terminator_ptr,
2491 uint parallel_gc_threads) { 2502 uint parallel_gc_threads) {
2492 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); 2503 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2493 2504
2494 // Once a thread has drained it's stack, it should try to steal chunks from 2505 // Once a thread has drained it's stack, it should try to steal regions from
2495 // other threads. 2506 // other threads.
2496 if (parallel_gc_threads > 1) { 2507 if (parallel_gc_threads > 1) {
2497 for (uint j = 0; j < parallel_gc_threads; j++) { 2508 for (uint j = 0; j < parallel_gc_threads; j++) {
2498 q->enqueue(new StealChunkCompactionTask(terminator_ptr)); 2509 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2499 } 2510 }
2500 } 2511 }
2501 } 2512 }
2502 2513
2503 void PSParallelCompact::compact() { 2514 void PSParallelCompact::compact() {
2508 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 2519 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2509 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 2520 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2510 PSOldGen* old_gen = heap->old_gen(); 2521 PSOldGen* old_gen = heap->old_gen();
2511 old_gen->start_array()->reset(); 2522 old_gen->start_array()->reset();
2512 uint parallel_gc_threads = heap->gc_task_manager()->workers(); 2523 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2513 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); 2524 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2514 ParallelTaskTerminator terminator(parallel_gc_threads, qset); 2525 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2515 2526
2516 GCTaskQueue* q = GCTaskQueue::create(); 2527 GCTaskQueue* q = GCTaskQueue::create();
2517 enqueue_chunk_draining_tasks(q, parallel_gc_threads); 2528 enqueue_region_draining_tasks(q, parallel_gc_threads);
2518 enqueue_dense_prefix_tasks(q, parallel_gc_threads); 2529 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
2519 enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads); 2530 enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
2520 2531
2521 { 2532 {
2522 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); 2533 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2523 2534
2524 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); 2535 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2530 2541
2531 // We have to release the barrier tasks! 2542 // We have to release the barrier tasks!
2532 WaitForBarrierGCTask::destroy(fin); 2543 WaitForBarrierGCTask::destroy(fin);
2533 2544
2534 #ifdef ASSERT 2545 #ifdef ASSERT
2535 // Verify that all chunks have been processed before the deferred updates. 2546 // Verify that all regions have been processed before the deferred updates.
2536 // Note that perm_space_id is skipped; this type of verification is not 2547 // Note that perm_space_id is skipped; this type of verification is not
2537 // valid until the perm gen is compacted by chunks. 2548 // valid until the perm gen is compacted by regions.
2538 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 2549 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2539 verify_complete(SpaceId(id)); 2550 verify_complete(SpaceId(id));
2540 } 2551 }
2541 #endif 2552 #endif
2542 } 2553 }
2551 } 2562 }
2552 } 2563 }
2553 2564
2554 #ifdef ASSERT 2565 #ifdef ASSERT
2555 void PSParallelCompact::verify_complete(SpaceId space_id) { 2566 void PSParallelCompact::verify_complete(SpaceId space_id) {
2556 // All Chunks between space bottom() to new_top() should be marked as filled 2567 // All Regions between space bottom() to new_top() should be marked as filled
2557 // and all Chunks between new_top() and top() should be available (i.e., 2568 // and all Regions between new_top() and top() should be available (i.e.,
2558 // should have been emptied). 2569 // should have been emptied).
2559 ParallelCompactData& sd = summary_data(); 2570 ParallelCompactData& sd = summary_data();
2560 SpaceInfo si = _space_info[space_id]; 2571 SpaceInfo si = _space_info[space_id];
2561 HeapWord* new_top_addr = sd.chunk_align_up(si.new_top()); 2572 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2562 HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top()); 2573 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2563 const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom()); 2574 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2564 const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr); 2575 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2565 const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr); 2576 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2566 2577
2567 bool issued_a_warning = false; 2578 bool issued_a_warning = false;
2568 2579
2569 size_t cur_chunk; 2580 size_t cur_region;
2570 for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) { 2581 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2571 const ChunkData* const c = sd.chunk(cur_chunk); 2582 const RegionData* const c = sd.region(cur_region);
2572 if (!c->completed()) { 2583 if (!c->completed()) {
2573 warning("chunk " SIZE_FORMAT " not filled: " 2584 warning("region " SIZE_FORMAT " not filled: "
2574 "destination_count=" SIZE_FORMAT, 2585 "destination_count=" SIZE_FORMAT,
2575 cur_chunk, c->destination_count()); 2586 cur_region, c->destination_count());
2576 issued_a_warning = true; 2587 issued_a_warning = true;
2577 } 2588 }
2578 } 2589 }
2579 2590
2580 for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) { 2591 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2581 const ChunkData* const c = sd.chunk(cur_chunk); 2592 const RegionData* const c = sd.region(cur_region);
2582 if (!c->available()) { 2593 if (!c->available()) {
2583 warning("chunk " SIZE_FORMAT " not empty: " 2594 warning("region " SIZE_FORMAT " not empty: "
2584 "destination_count=" SIZE_FORMAT, 2595 "destination_count=" SIZE_FORMAT,
2585 cur_chunk, c->destination_count()); 2596 cur_region, c->destination_count());
2586 issued_a_warning = true; 2597 issued_a_warning = true;
2587 } 2598 }
2588 } 2599 }
2589 2600
2590 if (issued_a_warning) { 2601 if (issued_a_warning) {
2591 print_chunk_ranges(); 2602 print_region_ranges();
2592 } 2603 }
2593 } 2604 }
2594 #endif // #ifdef ASSERT 2605 #endif // #ifdef ASSERT
2595 2606
2596 void PSParallelCompact::compact_serial(ParCompactionManager* cm) { 2607 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
2787 2798
2788 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q); 2799 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2789 } 2800 }
2790 #endif //VALIDATE_MARK_SWEEP 2801 #endif //VALIDATE_MARK_SWEEP
2791 2802
2792 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk). 2803 // Update interior oops in the ranges of regions [beg_region, end_region).
2793 void 2804 void
2794 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, 2805 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2795 SpaceId space_id, 2806 SpaceId space_id,
2796 size_t beg_chunk, 2807 size_t beg_region,
2797 size_t end_chunk) { 2808 size_t end_region) {
2798 ParallelCompactData& sd = summary_data(); 2809 ParallelCompactData& sd = summary_data();
2799 ParMarkBitMap* const mbm = mark_bitmap(); 2810 ParMarkBitMap* const mbm = mark_bitmap();
2800 2811
2801 HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk); 2812 HeapWord* beg_addr = sd.region_to_addr(beg_region);
2802 HeapWord* const end_addr = sd.chunk_to_addr(end_chunk); 2813 HeapWord* const end_addr = sd.region_to_addr(end_region);
2803 assert(beg_chunk <= end_chunk, "bad chunk range"); 2814 assert(beg_region <= end_region, "bad region range");
2804 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix"); 2815 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2805 2816
2806 #ifdef ASSERT 2817 #ifdef ASSERT
2807 // Claim the chunks to avoid triggering an assert when they are marked as 2818 // Claim the regions to avoid triggering an assert when they are marked as
2808 // filled. 2819 // filled.
2809 for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) { 2820 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2810 assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed"); 2821 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2811 } 2822 }
2812 #endif // #ifdef ASSERT 2823 #endif // #ifdef ASSERT
2813 2824
2814 if (beg_addr != space(space_id)->bottom()) { 2825 if (beg_addr != space(space_id)->bottom()) {
2815 // Find the first live object or block of dead space that *starts* in this 2826 // Find the first live object or block of dead space that *starts* in this
2816 // range of chunks. If a partial object crosses onto the chunk, skip it; it 2827 // range of regions. If a partial object crosses onto the region, skip it;
2817 // will be marked for 'deferred update' when the object head is processed. 2828 // it will be marked for 'deferred update' when the object head is
2818 // If dead space crosses onto the chunk, it is also skipped; it will be 2829 // processed. If dead space crosses onto the region, it is also skipped; it
2819 // filled when the prior chunk is processed. If neither of those apply, the 2830 // will be filled when the prior region is processed. If neither of those
2820 // first word in the chunk is the start of a live object or dead space. 2831 // apply, the first word in the region is the start of a live object or dead
2832 // space.
2821 assert(beg_addr > space(space_id)->bottom(), "sanity"); 2833 assert(beg_addr > space(space_id)->bottom(), "sanity");
2822 const ChunkData* const cp = sd.chunk(beg_chunk); 2834 const RegionData* const cp = sd.region(beg_region);
2823 if (cp->partial_obj_size() != 0) { 2835 if (cp->partial_obj_size() != 0) {
2824 beg_addr = sd.partial_obj_end(beg_chunk); 2836 beg_addr = sd.partial_obj_end(beg_region);
2825 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) { 2837 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2826 beg_addr = mbm->find_obj_beg(beg_addr, end_addr); 2838 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2827 } 2839 }
2828 } 2840 }
2829 2841
2830 if (beg_addr < end_addr) { 2842 if (beg_addr < end_addr) {
2831 // A live object or block of dead space starts in this range of Chunks. 2843 // A live object or block of dead space starts in this range of Regions.
2832 HeapWord* const dense_prefix_end = dense_prefix(space_id); 2844 HeapWord* const dense_prefix_end = dense_prefix(space_id);
2833 2845
2834 // Create closures and iterate. 2846 // Create closures and iterate.
2835 UpdateOnlyClosure update_closure(mbm, cm, space_id); 2847 UpdateOnlyClosure update_closure(mbm, cm, space_id);
2836 FillClosure fill_closure(cm, space_id); 2848 FillClosure fill_closure(cm, space_id);
2840 if (status == ParMarkBitMap::incomplete) { 2852 if (status == ParMarkBitMap::incomplete) {
2841 update_closure.do_addr(update_closure.source()); 2853 update_closure.do_addr(update_closure.source());
2842 } 2854 }
2843 } 2855 }
2844 2856
2845 // Mark the chunks as filled. 2857 // Mark the regions as filled.
2846 ChunkData* const beg_cp = sd.chunk(beg_chunk); 2858 RegionData* const beg_cp = sd.region(beg_region);
2847 ChunkData* const end_cp = sd.chunk(end_chunk); 2859 RegionData* const end_cp = sd.region(end_region);
2848 for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) { 2860 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2849 cp->set_completed(); 2861 cp->set_completed();
2850 } 2862 }
2851 } 2863 }
2852 2864
2853 // Return the SpaceId for the space containing addr. If addr is not in the 2865 // Return the SpaceId for the space containing addr. If addr is not in the
2875 ObjectStartArray* const start_array = space_info->start_array(); 2887 ObjectStartArray* const start_array = space_info->start_array();
2876 2888
2877 const MutableSpace* const space = space_info->space(); 2889 const MutableSpace* const space = space_info->space();
2878 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set"); 2890 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2879 HeapWord* const beg_addr = space_info->dense_prefix(); 2891 HeapWord* const beg_addr = space_info->dense_prefix();
2880 HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top()); 2892 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2881 2893
2882 const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr); 2894 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2883 const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr); 2895 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2884 const ChunkData* cur_chunk; 2896 const RegionData* cur_region;
2885 for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) { 2897 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2886 HeapWord* const addr = cur_chunk->deferred_obj_addr(); 2898 HeapWord* const addr = cur_region->deferred_obj_addr();
2887 if (addr != NULL) { 2899 if (addr != NULL) {
2888 if (start_array != NULL) { 2900 if (start_array != NULL) {
2889 start_array->allocate_block(addr); 2901 start_array->allocate_block(addr);
2890 } 2902 }
2891 oop(addr)->update_contents(cm); 2903 oop(addr)->update_contents(cm);
2927 return m->bit_to_addr(cur_beg); 2939 return m->bit_to_addr(cur_beg);
2928 } 2940 }
2929 2941
2930 HeapWord* 2942 HeapWord*
2931 PSParallelCompact::first_src_addr(HeapWord* const dest_addr, 2943 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2932 size_t src_chunk_idx) 2944 size_t src_region_idx)
2933 { 2945 {
2934 ParMarkBitMap* const bitmap = mark_bitmap(); 2946 ParMarkBitMap* const bitmap = mark_bitmap();
2935 const ParallelCompactData& sd = summary_data(); 2947 const ParallelCompactData& sd = summary_data();
2936 const size_t ChunkSize = ParallelCompactData::ChunkSize; 2948 const size_t RegionSize = ParallelCompactData::RegionSize;
2937 2949
2938 assert(sd.is_chunk_aligned(dest_addr), "not aligned"); 2950 assert(sd.is_region_aligned(dest_addr), "not aligned");
2939 2951
2940 const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx); 2952 const RegionData* const src_region_ptr = sd.region(src_region_idx);
2941 const size_t partial_obj_size = src_chunk_ptr->partial_obj_size(); 2953 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2942 HeapWord* const src_chunk_destination = src_chunk_ptr->destination(); 2954 HeapWord* const src_region_destination = src_region_ptr->destination();
2943 2955
2944 assert(dest_addr >= src_chunk_destination, "wrong src chunk"); 2956 assert(dest_addr >= src_region_destination, "wrong src region");
2945 assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty"); 2957 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2946 2958
2947 HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx); 2959 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2948 HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize; 2960 HeapWord* const src_region_end = src_region_beg + RegionSize;
2949 2961
2950 HeapWord* addr = src_chunk_beg; 2962 HeapWord* addr = src_region_beg;
2951 if (dest_addr == src_chunk_destination) { 2963 if (dest_addr == src_region_destination) {
2952 // Return the first live word in the source chunk. 2964 // Return the first live word in the source region.
2953 if (partial_obj_size == 0) { 2965 if (partial_obj_size == 0) {
2954 addr = bitmap->find_obj_beg(addr, src_chunk_end); 2966 addr = bitmap->find_obj_beg(addr, src_region_end);
2955 assert(addr < src_chunk_end, "no objects start in src chunk"); 2967 assert(addr < src_region_end, "no objects start in src region");
2956 } 2968 }
2957 return addr; 2969 return addr;
2958 } 2970 }
2959 2971
2960 // Must skip some live data. 2972 // Must skip some live data.
2961 size_t words_to_skip = dest_addr - src_chunk_destination; 2973 size_t words_to_skip = dest_addr - src_region_destination;
2962 assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk"); 2974 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2963 2975
2964 if (partial_obj_size >= words_to_skip) { 2976 if (partial_obj_size >= words_to_skip) {
2965 // All the live words to skip are part of the partial object. 2977 // All the live words to skip are part of the partial object.
2966 addr += words_to_skip; 2978 addr += words_to_skip;
2967 if (partial_obj_size == words_to_skip) { 2979 if (partial_obj_size == words_to_skip) {
2968 // Find the first live word past the partial object. 2980 // Find the first live word past the partial object.
2969 addr = bitmap->find_obj_beg(addr, src_chunk_end); 2981 addr = bitmap->find_obj_beg(addr, src_region_end);
2970 assert(addr < src_chunk_end, "wrong src chunk"); 2982 assert(addr < src_region_end, "wrong src region");
2971 } 2983 }
2972 return addr; 2984 return addr;
2973 } 2985 }
2974 2986
2975 // Skip over the partial object (if any). 2987 // Skip over the partial object (if any).
2976 if (partial_obj_size != 0) { 2988 if (partial_obj_size != 0) {
2977 words_to_skip -= partial_obj_size; 2989 words_to_skip -= partial_obj_size;
2978 addr += partial_obj_size; 2990 addr += partial_obj_size;
2979 } 2991 }
2980 2992
2981 // Skip over live words due to objects that start in the chunk. 2993 // Skip over live words due to objects that start in the region.
2982 addr = skip_live_words(addr, src_chunk_end, words_to_skip); 2994 addr = skip_live_words(addr, src_region_end, words_to_skip);
2983 assert(addr < src_chunk_end, "wrong src chunk"); 2995 assert(addr < src_region_end, "wrong src region");
2984 return addr; 2996 return addr;
2985 } 2997 }
2986 2998
2987 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, 2999 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2988 size_t beg_chunk, 3000 size_t beg_region,
2989 HeapWord* end_addr) 3001 HeapWord* end_addr)
2990 { 3002 {
2991 ParallelCompactData& sd = summary_data(); 3003 ParallelCompactData& sd = summary_data();
2992 ChunkData* const beg = sd.chunk(beg_chunk); 3004 RegionData* const beg = sd.region(beg_region);
2993 HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr); 3005 HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
2994 ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up); 3006 RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
2995 size_t cur_idx = beg_chunk; 3007 size_t cur_idx = beg_region;
2996 for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) { 3008 for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
2997 assert(cur->data_size() > 0, "chunk must have live data"); 3009 assert(cur->data_size() > 0, "region must have live data");
2998 cur->decrement_destination_count(); 3010 cur->decrement_destination_count();
2999 if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) { 3011 if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
3000 cm->save_for_processing(cur_idx); 3012 cm->save_for_processing(cur_idx);
3001 } 3013 }
3002 } 3014 }
3003 } 3015 }
3004 3016
3005 size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure, 3017 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
3006 SpaceId& src_space_id, 3018 SpaceId& src_space_id,
3007 HeapWord*& src_space_top, 3019 HeapWord*& src_space_top,
3008 HeapWord* end_addr) 3020 HeapWord* end_addr)
3009 { 3021 {
3010 typedef ParallelCompactData::ChunkData ChunkData; 3022 typedef ParallelCompactData::RegionData RegionData;
3011 3023
3012 ParallelCompactData& sd = PSParallelCompact::summary_data(); 3024 ParallelCompactData& sd = PSParallelCompact::summary_data();
3013 const size_t chunk_size = ParallelCompactData::ChunkSize; 3025 const size_t region_size = ParallelCompactData::RegionSize;
3014 3026
3015 size_t src_chunk_idx = 0; 3027 size_t src_region_idx = 0;
3016 3028
3017 // Skip empty chunks (if any) up to the top of the space. 3029 // Skip empty regions (if any) up to the top of the space.
3018 HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr); 3030 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
3019 ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up); 3031 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
3020 HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top); 3032 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
3021 const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up); 3033 const RegionData* const top_region_ptr =
3022 while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) { 3034 sd.addr_to_region_ptr(top_aligned_up);
3023 ++src_chunk_ptr; 3035 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
3024 } 3036 ++src_region_ptr;
3025 3037 }
3026 if (src_chunk_ptr < top_chunk_ptr) { 3038
3027 // The next source chunk is in the current space. Update src_chunk_idx and 3039 if (src_region_ptr < top_region_ptr) {
3028 // the source address to match src_chunk_ptr. 3040 // The next source region is in the current space. Update src_region_idx
3029 src_chunk_idx = sd.chunk(src_chunk_ptr); 3041 // and the source address to match src_region_ptr.
3030 HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx); 3042 src_region_idx = sd.region(src_region_ptr);
3031 if (src_chunk_addr > closure.source()) { 3043 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
3032 closure.set_source(src_chunk_addr); 3044 if (src_region_addr > closure.source()) {
3033 } 3045 closure.set_source(src_region_addr);
3034 return src_chunk_idx; 3046 }
3035 } 3047 return src_region_idx;
3036 3048 }
3037 // Switch to a new source space and find the first non-empty chunk. 3049
3050 // Switch to a new source space and find the first non-empty region.
3038 unsigned int space_id = src_space_id + 1; 3051 unsigned int space_id = src_space_id + 1;
3039 assert(space_id < last_space_id, "not enough spaces"); 3052 assert(space_id < last_space_id, "not enough spaces");
3040 3053
3041 HeapWord* const destination = closure.destination(); 3054 HeapWord* const destination = closure.destination();
3042 3055
3043 do { 3056 do {
3044 MutableSpace* space = _space_info[space_id].space(); 3057 MutableSpace* space = _space_info[space_id].space();
3045 HeapWord* const bottom = space->bottom(); 3058 HeapWord* const bottom = space->bottom();
3046 const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom); 3059 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
3047 3060
3048 // Iterate over the spaces that do not compact into themselves. 3061 // Iterate over the spaces that do not compact into themselves.
3049 if (bottom_cp->destination() != bottom) { 3062 if (bottom_cp->destination() != bottom) {
3050 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); 3063 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
3051 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); 3064 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
3052 3065
3053 for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { 3066 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
3054 if (src_cp->live_obj_size() > 0) { 3067 if (src_cp->live_obj_size() > 0) {
3055 // Found it. 3068 // Found it.
3056 assert(src_cp->destination() == destination, 3069 assert(src_cp->destination() == destination,
3057 "first live obj in the space must match the destination"); 3070 "first live obj in the space must match the destination");
3058 assert(src_cp->partial_obj_size() == 0, 3071 assert(src_cp->partial_obj_size() == 0,
3059 "a space cannot begin with a partial obj"); 3072 "a space cannot begin with a partial obj");
3060 3073
3061 src_space_id = SpaceId(space_id); 3074 src_space_id = SpaceId(space_id);
3062 src_space_top = space->top(); 3075 src_space_top = space->top();
3063 const size_t src_chunk_idx = sd.chunk(src_cp); 3076 const size_t src_region_idx = sd.region(src_cp);
3064 closure.set_source(sd.chunk_to_addr(src_chunk_idx)); 3077 closure.set_source(sd.region_to_addr(src_region_idx));
3065 return src_chunk_idx; 3078 return src_region_idx;
3066 } else { 3079 } else {
3067 assert(src_cp->data_size() == 0, "sanity"); 3080 assert(src_cp->data_size() == 0, "sanity");
3068 } 3081 }
3069 } 3082 }
3070 } 3083 }
3071 } while (++space_id < last_space_id); 3084 } while (++space_id < last_space_id);
3072 3085
3073 assert(false, "no source chunk was found"); 3086 assert(false, "no source region was found");
3074 return 0; 3087 return 0;
3075 } 3088 }
3076 3089
3077 void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx) 3090 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
3078 { 3091 {
3079 typedef ParMarkBitMap::IterationStatus IterationStatus; 3092 typedef ParMarkBitMap::IterationStatus IterationStatus;
3080 const size_t ChunkSize = ParallelCompactData::ChunkSize; 3093 const size_t RegionSize = ParallelCompactData::RegionSize;
3081 ParMarkBitMap* const bitmap = mark_bitmap(); 3094 ParMarkBitMap* const bitmap = mark_bitmap();
3082 ParallelCompactData& sd = summary_data(); 3095 ParallelCompactData& sd = summary_data();
3083 ChunkData* const chunk_ptr = sd.chunk(chunk_idx); 3096 RegionData* const region_ptr = sd.region(region_idx);
3084 3097
3085 // Get the items needed to construct the closure. 3098 // Get the items needed to construct the closure.
3086 HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx); 3099 HeapWord* dest_addr = sd.region_to_addr(region_idx);
3087 SpaceId dest_space_id = space_id(dest_addr); 3100 SpaceId dest_space_id = space_id(dest_addr);
3088 ObjectStartArray* start_array = _space_info[dest_space_id].start_array(); 3101 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
3089 HeapWord* new_top = _space_info[dest_space_id].new_top(); 3102 HeapWord* new_top = _space_info[dest_space_id].new_top();
3090 assert(dest_addr < new_top, "sanity"); 3103 assert(dest_addr < new_top, "sanity");
3091 const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize); 3104 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
3092 3105
3093 // Get the source chunk and related info. 3106 // Get the source region and related info.
3094 size_t src_chunk_idx = chunk_ptr->source_chunk(); 3107 size_t src_region_idx = region_ptr->source_region();
3095 SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx)); 3108 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
3096 HeapWord* src_space_top = _space_info[src_space_id].space()->top(); 3109 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
3097 3110
3098 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); 3111 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3099 closure.set_source(first_src_addr(dest_addr, src_chunk_idx)); 3112 closure.set_source(first_src_addr(dest_addr, src_region_idx));
3100 3113
3101 // Adjust src_chunk_idx to prepare for decrementing destination counts (the 3114 // Adjust src_region_idx to prepare for decrementing destination counts (the
3102 // destination count is not decremented when a chunk is copied to itself). 3115 // destination count is not decremented when a region is copied to itself).
3103 if (src_chunk_idx == chunk_idx) { 3116 if (src_region_idx == region_idx) {
3104 src_chunk_idx += 1; 3117 src_region_idx += 1;
3105 } 3118 }
3106 3119
3107 if (bitmap->is_unmarked(closure.source())) { 3120 if (bitmap->is_unmarked(closure.source())) {
3108 // The first source word is in the middle of an object; copy the remainder 3121 // The first source word is in the middle of an object; copy the remainder
3109 // of the object or as much as will fit. The fact that pointer updates were 3122 // of the object or as much as will fit. The fact that pointer updates were
3110 // deferred will be noted when the object header is processed. 3123 // deferred will be noted when the object header is processed.
3111 HeapWord* const old_src_addr = closure.source(); 3124 HeapWord* const old_src_addr = closure.source();
3112 closure.copy_partial_obj(); 3125 closure.copy_partial_obj();
3113 if (closure.is_full()) { 3126 if (closure.is_full()) {
3114 decrement_destination_counts(cm, src_chunk_idx, closure.source()); 3127 decrement_destination_counts(cm, src_region_idx, closure.source());
3115 chunk_ptr->set_deferred_obj_addr(NULL); 3128 region_ptr->set_deferred_obj_addr(NULL);
3116 chunk_ptr->set_completed(); 3129 region_ptr->set_completed();
3117 return; 3130 return;
3118 } 3131 }
3119 3132
3120 HeapWord* const end_addr = sd.chunk_align_down(closure.source()); 3133 HeapWord* const end_addr = sd.region_align_down(closure.source());
3121 if (sd.chunk_align_down(old_src_addr) != end_addr) { 3134 if (sd.region_align_down(old_src_addr) != end_addr) {
3122 // The partial object was copied from more than one source chunk. 3135 // The partial object was copied from more than one source region.
3123 decrement_destination_counts(cm, src_chunk_idx, end_addr); 3136 decrement_destination_counts(cm, src_region_idx, end_addr);
3124 3137
3125 // Move to the next source chunk, possibly switching spaces as well. All 3138 // Move to the next source region, possibly switching spaces as well. All
3126 // args except end_addr may be modified. 3139 // args except end_addr may be modified.
3127 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, 3140 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3128 end_addr); 3141 end_addr);
3129 } 3142 }
3130 } 3143 }
3131 3144
3132 do { 3145 do {
3133 HeapWord* const cur_addr = closure.source(); 3146 HeapWord* const cur_addr = closure.source();
3134 HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1), 3147 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
3135 src_space_top); 3148 src_space_top);
3136 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr); 3149 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3137 3150
3138 if (status == ParMarkBitMap::incomplete) { 3151 if (status == ParMarkBitMap::incomplete) {
3139 // The last obj that starts in the source chunk does not end in the chunk. 3152 // The last obj that starts in the source region does not end in the
3153 // region.
3140 assert(closure.source() < end_addr, "sanity") 3154 assert(closure.source() < end_addr, "sanity")
3141 HeapWord* const obj_beg = closure.source(); 3155 HeapWord* const obj_beg = closure.source();
3142 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(), 3156 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3143 src_space_top); 3157 src_space_top);
3144 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end); 3158 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3153 } 3167 }
3154 } 3168 }
3155 3169
3156 if (status == ParMarkBitMap::would_overflow) { 3170 if (status == ParMarkBitMap::would_overflow) {
3157 // The last object did not fit. Note that interior oop updates were 3171 // The last object did not fit. Note that interior oop updates were
3158 // deferred, then copy enough of the object to fill the chunk. 3172 // deferred, then copy enough of the object to fill the region.
3159 chunk_ptr->set_deferred_obj_addr(closure.destination()); 3173 region_ptr->set_deferred_obj_addr(closure.destination());
3160 status = closure.copy_until_full(); // copies from closure.source() 3174 status = closure.copy_until_full(); // copies from closure.source()
3161 3175
3162 decrement_destination_counts(cm, src_chunk_idx, closure.source()); 3176 decrement_destination_counts(cm, src_region_idx, closure.source());
3163 chunk_ptr->set_completed(); 3177 region_ptr->set_completed();
3164 return; 3178 return;
3165 } 3179 }
3166 3180
3167 if (status == ParMarkBitMap::full) { 3181 if (status == ParMarkBitMap::full) {
3168 decrement_destination_counts(cm, src_chunk_idx, closure.source()); 3182 decrement_destination_counts(cm, src_region_idx, closure.source());
3169 chunk_ptr->set_deferred_obj_addr(NULL); 3183 region_ptr->set_deferred_obj_addr(NULL);
3170 chunk_ptr->set_completed(); 3184 region_ptr->set_completed();
3171 return; 3185 return;
3172 } 3186 }
3173 3187
3174 decrement_destination_counts(cm, src_chunk_idx, end_addr); 3188 decrement_destination_counts(cm, src_region_idx, end_addr);
3175 3189
3176 // Move to the next source chunk, possibly switching spaces as well. All 3190 // Move to the next source region, possibly switching spaces as well. All
3177 // args except end_addr may be modified. 3191 // args except end_addr may be modified.
3178 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, 3192 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3179 end_addr); 3193 end_addr);
3180 } while (true); 3194 } while (true);
3181 } 3195 }
3182 3196
3183 void 3197 void
3184 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) { 3198 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3206 bitmap->iterate(&reset_objects, beg_addr, end_addr); 3220 bitmap->iterate(&reset_objects, beg_addr, end_addr);
3207 return; 3221 return;
3208 } 3222 }
3209 #endif 3223 #endif
3210 3224
3211 const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr); 3225 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
3212 const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr); 3226 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
3213 if (beg_chunk < dp_chunk) { 3227 if (beg_region < dp_region) {
3214 update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk); 3228 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
3215 } 3229 }
3216 3230
3217 // The destination of the first live object that starts in the chunk is one 3231 // The destination of the first live object that starts in the region is one
3218 // past the end of the partial object entering the chunk (if any). 3232 // past the end of the partial object entering the region (if any).
3219 HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk); 3233 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3220 HeapWord* const new_top = _space_info[space_id].new_top(); 3234 HeapWord* const new_top = _space_info[space_id].new_top();
3221 assert(new_top >= dest_addr, "bad new_top value"); 3235 assert(new_top >= dest_addr, "bad new_top value");
3222 const size_t words = pointer_delta(new_top, dest_addr); 3236 const size_t words = pointer_delta(new_top, dest_addr);
3223 3237
3224 if (words > 0) { 3238 if (words > 0) {
3325 return ParMarkBitMap::incomplete; 3339 return ParMarkBitMap::incomplete;
3326 } 3340 }
3327 3341
3328 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm, 3342 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
3329 ParCompactionManager* cm, 3343 ParCompactionManager* cm,
3330 size_t chunk_index) : 3344 size_t region_index) :
3331 ParMarkBitMapClosure(mbm, cm), 3345 ParMarkBitMapClosure(mbm, cm),
3332 _live_data_left(0), 3346 _live_data_left(0),
3333 _cur_block(0) { 3347 _cur_block(0) {
3334 _chunk_start = 3348 _region_start =
3335 PSParallelCompact::summary_data().chunk_to_addr(chunk_index); 3349 PSParallelCompact::summary_data().region_to_addr(region_index);
3336 _chunk_end = 3350 _region_end =
3337 PSParallelCompact::summary_data().chunk_to_addr(chunk_index) + 3351 PSParallelCompact::summary_data().region_to_addr(region_index) +
3338 ParallelCompactData::ChunkSize; 3352 ParallelCompactData::RegionSize;
3339 _chunk_index = chunk_index; 3353 _region_index = region_index;
3340 _cur_block = 3354 _cur_block =
3341 PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start); 3355 PSParallelCompact::summary_data().addr_to_block_idx(_region_start);
3342 } 3356 }
3343 3357
3344 bool BitBlockUpdateClosure::chunk_contains_cur_block() { 3358 bool BitBlockUpdateClosure::region_contains_cur_block() {
3345 return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block); 3359 return ParallelCompactData::region_contains_block(_region_index, _cur_block);
3346 } 3360 }
3347 3361
3348 void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) { 3362 void BitBlockUpdateClosure::reset_region(size_t region_index) {
3349 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);) 3363 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);)
3350 ParallelCompactData& sd = PSParallelCompact::summary_data(); 3364 ParallelCompactData& sd = PSParallelCompact::summary_data();
3351 _chunk_index = chunk_index; 3365 _region_index = region_index;
3352 _live_data_left = 0; 3366 _live_data_left = 0;
3353 _chunk_start = sd.chunk_to_addr(chunk_index); 3367 _region_start = sd.region_to_addr(region_index);
3354 _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize; 3368 _region_end = sd.region_to_addr(region_index) + ParallelCompactData::RegionSize;
3355 3369
3356 // The first block in this chunk 3370 // The first block in this region
3357 size_t first_block = sd.addr_to_block_idx(_chunk_start); 3371 size_t first_block = sd.addr_to_block_idx(_region_start);
3358 size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size(); 3372 size_t partial_live_size = sd.region(region_index)->partial_obj_size();
3359 3373
3360 // Set the offset to 0. By definition it should have that value 3374 // Set the offset to 0. By definition it should have that value
3361 // but it may have been written while processing an earlier chunk. 3375 // but it may have been written while processing an earlier region.
3362 if (partial_live_size == 0) { 3376 if (partial_live_size == 0) {
3363 // No live object extends onto the chunk. The first bit 3377 // No live object extends onto the region. The first bit
3364 // in the bit map for the first chunk must be a start bit. 3378 // in the bit map for the first region must be a start bit.
3365 // Although there may not be any marked bits, it is safe 3379 // Although there may not be any marked bits, it is safe
3366 // to set it as a start bit. 3380 // to set it as a start bit.
3367 sd.block(first_block)->set_start_bit_offset(0); 3381 sd.block(first_block)->set_start_bit_offset(0);
3368 sd.block(first_block)->set_first_is_start_bit(true); 3382 sd.block(first_block)->set_first_is_start_bit(true);
3369 } else if (sd.partial_obj_ends_in_block(first_block)) { 3383 } else if (sd.partial_obj_ends_in_block(first_block)) {
3411 // add the size to the block data. 3425 // add the size to the block data.
3412 HeapWord* obj = addr; 3426 HeapWord* obj = addr;
3413 ParallelCompactData& sd = PSParallelCompact::summary_data(); 3427 ParallelCompactData& sd = PSParallelCompact::summary_data();
3414 3428
3415 assert(bitmap()->obj_size(obj) == words, "bad size"); 3429 assert(bitmap()->obj_size(obj) == words, "bad size");
3416 assert(_chunk_start <= obj, "object is not in chunk"); 3430 assert(_region_start <= obj, "object is not in region");
3417 assert(obj + words <= _chunk_end, "object is not in chunk"); 3431 assert(obj + words <= _region_end, "object is not in region");
3418 3432
3419 // Update the live data to the left 3433 // Update the live data to the left
3420 size_t prev_live_data_left = _live_data_left; 3434 size_t prev_live_data_left = _live_data_left;
3421 _live_data_left = _live_data_left + words; 3435 _live_data_left = _live_data_left + words;
3422 3436
3430 // No object crossed the block boundary and this object was found 3444 // No object crossed the block boundary and this object was found
3431 // on the other side of the block boundary. Update the offset for 3445 // on the other side of the block boundary. Update the offset for
3432 // the new block with the data size that does not include this object. 3446 // the new block with the data size that does not include this object.
3433 // 3447 //
3434 // The first bit in block_of_obj is a start bit except in the 3448 // The first bit in block_of_obj is a start bit except in the
3435 // case where the partial object for the chunk extends into 3449 // case where the partial object for the region extends into
3436 // this block. 3450 // this block.
3437 if (sd.partial_obj_ends_in_block(block_of_obj)) { 3451 if (sd.partial_obj_ends_in_block(block_of_obj)) {
3438 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left); 3452 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left);
3439 } else { 3453 } else {
3440 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left); 3454 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left);
3447 // the block where the object ends 3461 // the block where the object ends
3448 // 3462 //
3449 // The offset for blocks with no objects starting in them 3463 // The offset for blocks with no objects starting in them
3450 // (e.g., blocks between _cur_block and block_of_obj_last) 3464 // (e.g., blocks between _cur_block and block_of_obj_last)
3451 // should not be needed. 3465 // should not be needed.
3452 // Note that block_of_obj_last may be in another chunk. If so, 3466 // Note that block_of_obj_last may be in another region. If so,
3453 // it should be overwritten later. This is a problem (writting 3467 // it should be overwritten later. This is a problem (writting
3454 // into a block in a later chunk) for parallel execution. 3468 // into a block in a later region) for parallel execution.
3455 assert(obj < block_of_obj_last_addr, 3469 assert(obj < block_of_obj_last_addr,
3456 "Object should start in previous block"); 3470 "Object should start in previous block");
3457 3471
3458 // obj is crossing into block_of_obj_last so the first bit 3472 // obj is crossing into block_of_obj_last so the first bit
3459 // is and end bit. 3473 // is and end bit.
3483 _cur_block = block_of_obj_last; 3497 _cur_block = block_of_obj_last;
3484 } 3498 }
3485 } 3499 }
3486 3500
3487 // Return incomplete if there are more blocks to be done. 3501 // Return incomplete if there are more blocks to be done.
3488 if (chunk_contains_cur_block()) { 3502 if (region_contains_cur_block()) {
3489 return ParMarkBitMap::incomplete; 3503 return ParMarkBitMap::incomplete;
3490 } 3504 }
3491 return ParMarkBitMap::complete; 3505 return ParMarkBitMap::complete;
3492 } 3506 }
3493 3507