comparison src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @ 376:0166ac265d53

6729594: par compact - remove unused block table implementation Reviewed-by: tonyp, jmasa, apetrusenko
author jcoomes
date Tue, 30 Sep 2008 13:15:27 -0700
parents 81cd571500b0
children 7d7a7c599c17
comparison
equal deleted inserted replaced
375:81cd571500b0 376:0166ac265d53
85 // Mask for the bits in a pointer to get an offset within a region. 85 // Mask for the bits in a pointer to get an offset within a region.
86 static const size_t RegionAddrOffsetMask; 86 static const size_t RegionAddrOffsetMask;
87 // Mask for the bits in a pointer to get the address of the start of a region. 87 // Mask for the bits in a pointer to get the address of the start of a region.
88 static const size_t RegionAddrMask; 88 static const size_t RegionAddrMask;
89 89
90 static const size_t Log2BlockSize;
91 static const size_t BlockSize;
92 static const size_t BlockOffsetMask;
93 static const size_t BlockMask;
94
95 static const size_t BlocksPerRegion;
96
97 class RegionData 90 class RegionData
98 { 91 {
99 public: 92 public:
100 // Destination address of the region. 93 // Destination address of the region.
101 HeapWord* destination() const { return _destination; } 94 HeapWord* destination() const { return _destination; }
214 uint _pushed; // 0 until region is pushed onto a worker's stack 207 uint _pushed; // 0 until region is pushed onto a worker's stack
215 private: 208 private:
216 #endif 209 #endif
217 }; 210 };
218 211
219 // 'Blocks' allow shorter sections of the bitmap to be searched. Each Block
220 // holds an offset, which is the amount of live data in the Region to the left
221 // of the first live object in the Block. This amount of live data will
222 // include any object extending into the block. The first block in
223 // a region does not include any partial object extending into the
224 // the region.
225 //
226 // The offset also encodes the
227 // 'parity' of the first 1 bit in the Block: a positive offset means the
228 // first 1 bit marks the start of an object, a negative offset means the first
229 // 1 bit marks the end of an object.
230 class BlockData
231 {
232 public:
233 typedef short int blk_ofs_t;
234
235 blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
236 blk_ofs_t raw_offset() const { return _offset; }
237 void set_first_is_start_bit(bool v) { _first_is_start_bit = v; }
238
239 #if 0
240 // The need for this method was anticipated but it is
241 // never actually used. Do not include it for now. If
242 // it is needed, consider the problem of what is passed
243 // as "v". To avoid warning errors the method set_start_bit_offset()
244 // was changed to take a size_t as the parameter and to do the
245 // check for the possible overflow. Doing the cast in these
246 // methods better limits the potential problems because of
247 // the size of the field to this class.
248 void set_raw_offset(blk_ofs_t v) { _offset = v; }
249 #endif
250 void set_start_bit_offset(size_t val) {
251 assert(val >= 0, "sanity");
252 _offset = (blk_ofs_t) val;
253 assert(val == (size_t) _offset, "Value is too large");
254 _first_is_start_bit = true;
255 }
256 void set_end_bit_offset(size_t val) {
257 assert(val >= 0, "sanity");
258 _offset = (blk_ofs_t) val;
259 assert(val == (size_t) _offset, "Value is too large");
260 _offset = - _offset;
261 _first_is_start_bit = false;
262 }
263 bool first_is_start_bit() {
264 assert(_set_phase > 0, "Not initialized");
265 return _first_is_start_bit;
266 }
267 bool first_is_end_bit() {
268 assert(_set_phase > 0, "Not initialized");
269 return !_first_is_start_bit;
270 }
271
272 private:
273 blk_ofs_t _offset;
274 // This is temporary until the mark_bitmap is separated into
275 // a start bit array and an end bit array.
276 bool _first_is_start_bit;
277 #ifdef ASSERT
278 short _set_phase;
279 static short _cur_phase;
280 public:
281 static void set_cur_phase(short v) { _cur_phase = v; }
282 #endif
283 };
284
285 public: 212 public:
286 ParallelCompactData(); 213 ParallelCompactData();
287 bool initialize(MemRegion covered_region); 214 bool initialize(MemRegion covered_region);
288 215
289 size_t region_count() const { return _region_count; } 216 size_t region_count() const { return _region_count; }
292 inline RegionData* region(size_t region_idx) const; 219 inline RegionData* region(size_t region_idx) const;
293 inline size_t region(const RegionData* const region_ptr) const; 220 inline size_t region(const RegionData* const region_ptr) const;
294 221
295 // Returns true if the given address is contained within the region 222 // Returns true if the given address is contained within the region
296 bool region_contains(size_t region_index, HeapWord* addr); 223 bool region_contains(size_t region_index, HeapWord* addr);
297
298 size_t block_count() const { return _block_count; }
299 inline BlockData* block(size_t n) const;
300
301 // Returns true if the given block is in the given region.
302 static bool region_contains_block(size_t region_index, size_t block_index);
303 224
304 void add_obj(HeapWord* addr, size_t len); 225 void add_obj(HeapWord* addr, size_t len);
305 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } 226 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
306 227
307 // Fill in the regions covering [beg, end) so that no data moves; i.e., the 228 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
332 253
333 inline HeapWord* region_align_down(HeapWord* addr) const; 254 inline HeapWord* region_align_down(HeapWord* addr) const;
334 inline HeapWord* region_align_up(HeapWord* addr) const; 255 inline HeapWord* region_align_up(HeapWord* addr) const;
335 inline bool is_region_aligned(HeapWord* addr) const; 256 inline bool is_region_aligned(HeapWord* addr) const;
336 257
337 // Analogous to region_offset() for blocks.
338 size_t block_offset(const HeapWord* addr) const;
339 size_t addr_to_block_idx(const HeapWord* addr) const;
340 size_t addr_to_block_idx(const oop obj) const {
341 return addr_to_block_idx((HeapWord*) obj);
342 }
343 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
344 inline HeapWord* block_to_addr(size_t block) const;
345
346 // Return the address one past the end of the partial object. 258 // Return the address one past the end of the partial object.
347 HeapWord* partial_obj_end(size_t region_idx) const; 259 HeapWord* partial_obj_end(size_t region_idx) const;
348 260
349 // Return the new location of the object p after the 261 // Return the new location of the object p after the
350 // the compaction. 262 // the compaction.
351 HeapWord* calc_new_pointer(HeapWord* addr); 263 HeapWord* calc_new_pointer(HeapWord* addr);
352 264
353 // Same as calc_new_pointer() using blocks.
354 HeapWord* block_calc_new_pointer(HeapWord* addr);
355
356 // Same as calc_new_pointer() using regions.
357 HeapWord* region_calc_new_pointer(HeapWord* addr);
358
359 HeapWord* calc_new_pointer(oop p) { 265 HeapWord* calc_new_pointer(oop p) {
360 return calc_new_pointer((HeapWord*) p); 266 return calc_new_pointer((HeapWord*) p);
361 } 267 }
362 268
363 // Return the updated address for the given klass 269 // Return the updated address for the given klass
364 klassOop calc_new_klass(klassOop); 270 klassOop calc_new_klass(klassOop);
365
366 // Given a block returns true if the partial object for the
367 // corresponding region ends in the block. Returns false, otherwise
368 // If there is no partial object, returns false.
369 bool partial_obj_ends_in_block(size_t block_index);
370
371 // Returns the block index for the block
372 static size_t block_idx(BlockData* block);
373 271
374 #ifdef ASSERT 272 #ifdef ASSERT
375 void verify_clear(const PSVirtualSpace* vspace); 273 void verify_clear(const PSVirtualSpace* vspace);
376 void verify_clear(); 274 void verify_clear();
377 #endif // #ifdef ASSERT 275 #endif // #ifdef ASSERT
378 276
379 private: 277 private:
380 bool initialize_block_data(size_t region_size);
381 bool initialize_region_data(size_t region_size); 278 bool initialize_region_data(size_t region_size);
382 PSVirtualSpace* create_vspace(size_t count, size_t element_size); 279 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
383 280
384 private: 281 private:
385 HeapWord* _region_start; 282 HeapWord* _region_start;
388 #endif // #ifdef ASSERT 285 #endif // #ifdef ASSERT
389 286
390 PSVirtualSpace* _region_vspace; 287 PSVirtualSpace* _region_vspace;
391 RegionData* _region_data; 288 RegionData* _region_data;
392 size_t _region_count; 289 size_t _region_count;
393
394 PSVirtualSpace* _block_vspace;
395 BlockData* _block_data;
396 size_t _block_count;
397 }; 290 };
398 291
399 inline uint 292 inline uint
400 ParallelCompactData::RegionData::destination_count_raw() const 293 ParallelCompactData::RegionData::destination_count_raw() const
401 { 294 {
500 assert(region_ptr >= _region_data, "bad arg"); 393 assert(region_ptr >= _region_data, "bad arg");
501 assert(region_ptr <= _region_data + region_count(), "bad arg"); 394 assert(region_ptr <= _region_data + region_count(), "bad arg");
502 return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); 395 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
503 } 396 }
504 397
505 inline ParallelCompactData::BlockData*
506 ParallelCompactData::block(size_t n) const {
507 assert(n < block_count(), "bad arg");
508 return _block_data + n;
509 }
510
511 inline size_t 398 inline size_t
512 ParallelCompactData::region_offset(const HeapWord* addr) const 399 ParallelCompactData::region_offset(const HeapWord* addr) const
513 { 400 {
514 assert(addr >= _region_start, "bad addr"); 401 assert(addr >= _region_start, "bad addr");
515 assert(addr <= _region_end, "bad addr"); 402 assert(addr <= _region_end, "bad addr");
570 457
571 inline bool 458 inline bool
572 ParallelCompactData::is_region_aligned(HeapWord* addr) const 459 ParallelCompactData::is_region_aligned(HeapWord* addr) const
573 { 460 {
574 return region_offset(addr) == 0; 461 return region_offset(addr) == 0;
575 }
576
577 inline size_t
578 ParallelCompactData::block_offset(const HeapWord* addr) const
579 {
580 assert(addr >= _region_start, "bad addr");
581 assert(addr <= _region_end, "bad addr");
582 return pointer_delta(addr, _region_start) & BlockOffsetMask;
583 }
584
585 inline size_t
586 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
587 {
588 assert(addr >= _region_start, "bad addr");
589 assert(addr <= _region_end, "bad addr");
590 return pointer_delta(addr, _region_start) >> Log2BlockSize;
591 }
592
593 inline ParallelCompactData::BlockData*
594 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
595 {
596 return block(addr_to_block_idx(addr));
597 }
598
599 inline HeapWord*
600 ParallelCompactData::block_to_addr(size_t block) const
601 {
602 assert(block < _block_count, "block out of range");
603 return _region_start + (block << Log2BlockSize);
604 } 462 }
605 463
606 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the 464 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
607 // do_addr() method. 465 // do_addr() method.
608 // 466 //
685 543
686 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) { 544 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
687 assert(_words_remaining >= words, "processed too many words"); 545 assert(_words_remaining >= words, "processed too many words");
688 _words_remaining -= words; 546 _words_remaining -= words;
689 } 547 }
690
691 // Closure for updating the block data during the summary phase.
692 class BitBlockUpdateClosure: public ParMarkBitMapClosure {
693 // ParallelCompactData::BlockData::blk_ofs_t _live_data_left;
694 size_t _live_data_left;
695 size_t _cur_block;
696 HeapWord* _region_start;
697 HeapWord* _region_end;
698 size_t _region_index;
699
700 public:
701 BitBlockUpdateClosure(ParMarkBitMap* mbm,
702 ParCompactionManager* cm,
703 size_t region_index);
704
705 size_t cur_block() { return _cur_block; }
706 size_t region_index() { return _region_index; }
707 size_t live_data_left() { return _live_data_left; }
708 // Returns true the first bit in the current block (cur_block) is
709 // a start bit.
710 // Returns true if the current block is within the region for the closure;
711 bool region_contains_cur_block();
712
713 // Set the region index and related region values for
714 // a new region.
715 void reset_region(size_t region_index);
716
717 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
718 };
719 548
720 // The UseParallelOldGC collector is a stop-the-world garbage collector that 549 // The UseParallelOldGC collector is a stop-the-world garbage collector that
721 // does parts of the collection using parallel threads. The collection includes 550 // does parts of the collection using parallel threads. The collection includes
722 // the tenured generation and the young generation. The permanent generation is 551 // the tenured generation and the young generation. The permanent generation is
723 // collected at the same time as the other two generations but the permanent 552 // collected at the same time as the other two generations but the permanent
807 class PSParallelCompact : AllStatic { 636 class PSParallelCompact : AllStatic {
808 public: 637 public:
809 // Convenient access to type names. 638 // Convenient access to type names.
810 typedef ParMarkBitMap::idx_t idx_t; 639 typedef ParMarkBitMap::idx_t idx_t;
811 typedef ParallelCompactData::RegionData RegionData; 640 typedef ParallelCompactData::RegionData RegionData;
812 typedef ParallelCompactData::BlockData BlockData;
813 641
814 typedef enum { 642 typedef enum {
815 perm_space_id, old_space_id, eden_space_id, 643 perm_space_id, old_space_id, eden_space_id,
816 from_space_id, to_space_id, last_space_id 644 from_space_id, to_space_id, last_space_id
817 } SpaceId; 645 } SpaceId;
1011 static void fill_dense_prefix_end(SpaceId id); 839 static void fill_dense_prefix_end(SpaceId id);
1012 840
1013 static void summarize_spaces_quick(); 841 static void summarize_spaces_quick();
1014 static void summarize_space(SpaceId id, bool maximum_compaction); 842 static void summarize_space(SpaceId id, bool maximum_compaction);
1015 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); 843 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
1016
1017 static bool block_first_offset(size_t block_index, idx_t* block_offset_ptr);
1018
1019 // Fill in the BlockData
1020 static void summarize_blocks(ParCompactionManager* cm,
1021 SpaceId first_compaction_space_id);
1022 844
1023 // The space that is compacted after space_id. 845 // The space that is compacted after space_id.
1024 static SpaceId next_compaction_space_id(SpaceId space_id); 846 static SpaceId next_compaction_space_id(SpaceId space_id);
1025 847
1026 // Adjust addresses in roots. Does not adjust addresses in heap. 848 // Adjust addresses in roots. Does not adjust addresses in heap.