comparison src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp @ 20337:1f1d373cd044

8038423: G1: Decommit memory within heap Summary: Allow G1 to decommit memory of arbitrary regions within the heap and their associated auxiliary data structures card table, BOT, hot card cache, and mark bitmaps. Reviewed-by: mgerdin, brutisso, jwilhelm
author tschatzl
date Thu, 21 Aug 2014 11:47:10 +0200
parents 6701abbc4441
children 0fcaab91d485
comparison
equal deleted inserted replaced
20336:6701abbc4441 20337:1f1d373cd044
23 */ 23 */
24 24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
27 27
28 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
28 #include "memory/memRegion.hpp" 29 #include "memory/memRegion.hpp"
29 #include "runtime/virtualspace.hpp" 30 #include "runtime/virtualspace.hpp"
30 #include "utilities/globalDefinitions.hpp" 31 #include "utilities/globalDefinitions.hpp"
31 32
32 // The CollectedHeap type requires subtypes to implement a method 33 // The CollectedHeap type requires subtypes to implement a method
104 // Same as above, but does not have any of the possible side effects 105 // Same as above, but does not have any of the possible side effects
105 // discussed above. 106 // discussed above.
106 inline HeapWord* block_start_const(const void* addr) const; 107 inline HeapWord* block_start_const(const void* addr) const;
107 }; 108 };
108 109
110 class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
111 public:
112 virtual void on_commit(uint start_idx, size_t num_regions);
113 };
114
109 // This implementation of "G1BlockOffsetTable" divides the covered region 115 // This implementation of "G1BlockOffsetTable" divides the covered region
110 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry 116 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
111 // for each such subregion indicates how far back one must go to find the 117 // for each such subregion indicates how far back one must go to find the
112 // start of the chunk that includes the first word of the subregion. 118 // start of the chunk that includes the first word of the subregion.
113 // 119 //
123 friend class G1BlockOffsetArray; 129 friend class G1BlockOffsetArray;
124 friend class G1BlockOffsetArrayContigSpace; 130 friend class G1BlockOffsetArrayContigSpace;
125 friend class VMStructs; 131 friend class VMStructs;
126 132
127 private: 133 private:
134 G1BlockOffsetSharedArrayMappingChangedListener _listener;
128 // The reserved region covered by the shared array. 135 // The reserved region covered by the shared array.
129 MemRegion _reserved; 136 MemRegion _reserved;
130 137
131 // End of the current committed region. 138 // End of the current committed region.
132 HeapWord* _end; 139 HeapWord* _end;
133 140
134 // Array for keeping offsets for retrieving object start fast given an 141 // Array for keeping offsets for retrieving object start fast given an
135 // address. 142 // address.
136 VirtualSpace _vs;
137 u_char* _offset_array; // byte array keeping backwards offsets 143 u_char* _offset_array; // byte array keeping backwards offsets
138
139 void check_index(size_t index, const char* msg) const {
140 assert(index < _vs.committed_size(),
141 err_msg("%s - "
142 "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
143 msg, index, _vs.committed_size()));
144 }
145 144
146 void check_offset(size_t offset, const char* msg) const { 145 void check_offset(size_t offset, const char* msg) const {
147 assert(offset <= N_words, 146 assert(offset <= N_words,
148 err_msg("%s - " 147 err_msg("%s - "
149 "offset: " SIZE_FORMAT ", N_words: " UINT32_FORMAT, 148 "offset: " SIZE_FORMAT ", N_words: " UINT32_FORMAT,
150 msg, offset, N_words)); 149 msg, offset, N_words));
151 } 150 }
152 151
153 // Bounds checking accessors: 152 // Bounds checking accessors:
154 // For performance these have to devolve to array accesses in product builds. 153 // For performance these have to devolve to array accesses in product builds.
155 u_char offset_array(size_t index) const { 154 inline u_char offset_array(size_t index) const;
156 check_index(index, "index out of range");
157 return _offset_array[index];
158 }
159 155
160 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); 156 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
161 157
162 void set_offset_array(size_t index, u_char offset) { 158 void set_offset_array_raw(size_t index, u_char offset) {
163 check_index(index, "index out of range");
164 check_offset(offset, "offset too large");
165 _offset_array[index] = offset; 159 _offset_array[index] = offset;
166 } 160 }
167 161
168 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { 162 inline void set_offset_array(size_t index, u_char offset);
169 check_index(index, "index out of range"); 163
170 assert(high >= low, "addresses out of order"); 164 inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
171 check_offset(pointer_delta(high, low), "offset too large"); 165
172 _offset_array[index] = (u_char) pointer_delta(high, low); 166 inline void set_offset_array(size_t left, size_t right, u_char offset);
173 } 167
174 168 inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
175 void set_offset_array(size_t left, size_t right, u_char offset) {
176 check_index(right, "right index out of range");
177 assert(left <= right, "indexes out of order");
178 size_t num_cards = right - left + 1;
179 if (UseMemSetInBOT) {
180 memset(&_offset_array[left], offset, num_cards);
181 } else {
182 size_t i = left;
183 const size_t end = i + num_cards;
184 for (; i < end; i++) {
185 _offset_array[i] = offset;
186 }
187 }
188 }
189
190 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
191 check_index(index, "index out of range");
192 assert(high >= low, "addresses out of order");
193 check_offset(pointer_delta(high, low), "offset too large");
194 assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
195 }
196 169
197 bool is_card_boundary(HeapWord* p) const; 170 bool is_card_boundary(HeapWord* p) const;
171
172 public:
198 173
199 // Return the number of slots needed for an offset array 174 // Return the number of slots needed for an offset array
200 // that covers mem_region_words words. 175 // that covers mem_region_words words.
201 // We always add an extra slot because if an object 176 static size_t compute_size(size_t mem_region_words) {
202 // ends on a card boundary we put a 0 in the next 177 size_t number_of_slots = (mem_region_words / N_words);
203 // offset array slot, so we want that slot always 178 return ReservedSpace::allocation_align_size_up(number_of_slots);
204 // to be reserved. 179 }
205 180
206 size_t compute_size(size_t mem_region_words) {
207 size_t number_of_slots = (mem_region_words / N_words) + 1;
208 return ReservedSpace::page_align_size_up(number_of_slots);
209 }
210
211 public:
212 enum SomePublicConstants { 181 enum SomePublicConstants {
213 LogN = 9, 182 LogN = 9,
214 LogN_words = LogN - LogHeapWordSize, 183 LogN_words = LogN - LogHeapWordSize,
215 N_bytes = 1 << LogN, 184 N_bytes = 1 << LogN,
216 N_words = 1 << LogN_words 185 N_words = 1 << LogN_words
220 // "base + init_word_size". In the future, the table may be expanded 189 // "base + init_word_size". In the future, the table may be expanded
221 // (see "resize" below) up to the size of "_reserved" (which must be at 190 // (see "resize" below) up to the size of "_reserved" (which must be at
222 // least "init_word_size".) The contents of the initial table are 191 // least "init_word_size".) The contents of the initial table are
223 // undefined; it is the responsibility of the constituent 192 // undefined; it is the responsibility of the constituent
224 // G1BlockOffsetTable(s) to initialize cards. 193 // G1BlockOffsetTable(s) to initialize cards.
225 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); 194 G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
226
227 // Notes a change in the committed size of the region covered by the
228 // table. The "new_word_size" may not be larger than the size of the
229 // reserved region this table covers.
230 void resize(size_t new_word_size);
231 195
232 void set_bottom(HeapWord* new_bottom); 196 void set_bottom(HeapWord* new_bottom);
233 197
234 // Return the appropriate index into "_offset_array" for "p". 198 // Return the appropriate index into "_offset_array" for "p".
235 inline size_t index_for(const void* p) const; 199 inline size_t index_for(const void* p) const;
200 inline size_t index_for_raw(const void* p) const;
236 201
237 // Return the address indicating the start of the region corresponding to 202 // Return the address indicating the start of the region corresponding to
238 // "index" in "_offset_array". 203 // "index" in "_offset_array".
239 inline HeapWord* address_for_index(size_t index) const; 204 inline HeapWord* address_for_index(size_t index) const;
205 // Variant of address_for_index that does not check the index for validity.
206 inline HeapWord* address_for_index_raw(size_t index) const {
207 return _reserved.start() + (index << LogN_words);
208 }
240 }; 209 };
241 210
242 // And here is the G1BlockOffsetTable subtype that uses the array. 211 // And here is the G1BlockOffsetTable subtype that uses the array.
243 212
244 class G1BlockOffsetArray: public G1BlockOffsetTable { 213 class G1BlockOffsetArray: public G1BlockOffsetTable {
474 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { 443 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
475 alloc_block_work2(&_next_offset_threshold, &_next_offset_index, 444 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
476 blk_start, blk_end); 445 blk_start, blk_end);
477 } 446 }
478 447
448 // Variant of zero_bottom_entry that does not check for availability of the
449 // memory first.
450 void zero_bottom_entry_raw();
451 // Variant of initialize_threshold that does not check for availability of the
452 // memory first.
453 HeapWord* initialize_threshold_raw();
479 // Zero out the entry for _bottom (offset will be zero). 454 // Zero out the entry for _bottom (offset will be zero).
480 void zero_bottom_entry(); 455 void zero_bottom_entry();
481 public: 456 public:
482 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); 457 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
483 458
484 // Initialize the threshold to reflect the first boundary after the 459 // Initialize the threshold to reflect the first boundary after the
485 // bottom of the covered region. 460 // bottom of the covered region.
486 HeapWord* initialize_threshold(); 461 HeapWord* initialize_threshold();
487 462
488 void reset_bot() { 463 void reset_bot() {
489 zero_bottom_entry(); 464 zero_bottom_entry_raw();
490 initialize_threshold(); 465 initialize_threshold_raw();
491 } 466 }
492 467
493 // Return the next threshold, the point at which the table should be 468 // Return the next threshold, the point at which the table should be
494 // updated. 469 // updated.
495 HeapWord* threshold() const { return _next_offset_threshold; } 470 HeapWord* threshold() const { return _next_offset_threshold; }