Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @ 196:d1605aabd0a1 jdk7-b30
6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell
author | xdono |
---|---|
date | Wed, 02 Jul 2008 12:55:16 -0700 |
parents | ba764ed4b6f2 |
children | 850fdf70db2b |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 class ParallelScavengeHeap; | |
26 class PSAdaptiveSizePolicy; | |
27 class PSYoungGen; | |
28 class PSOldGen; | |
29 class PSPermGen; | |
30 class ParCompactionManager; | |
31 class ParallelTaskTerminator; | |
32 class PSParallelCompact; | |
33 class GCTaskManager; | |
34 class GCTaskQueue; | |
35 class PreGCValues; | |
36 class MoveAndUpdateClosure; | |
37 class RefProcTaskExecutor; | |
38 | |
39 class SpaceInfo | |
40 { | |
41 public: | |
42 MutableSpace* space() const { return _space; } | |
43 | |
44 // Where the free space will start after the collection. Valid only after the | |
45 // summary phase completes. | |
46 HeapWord* new_top() const { return _new_top; } | |
47 | |
48 // Allows new_top to be set. | |
49 HeapWord** new_top_addr() { return &_new_top; } | |
50 | |
51 // Where the smallest allowable dense prefix ends (used only for perm gen). | |
52 HeapWord* min_dense_prefix() const { return _min_dense_prefix; } | |
53 | |
54 // Where the dense prefix ends, or the compacted region begins. | |
55 HeapWord* dense_prefix() const { return _dense_prefix; } | |
56 | |
57 // The start array for the (generation containing the) space, or NULL if there | |
58 // is no start array. | |
59 ObjectStartArray* start_array() const { return _start_array; } | |
60 | |
61 void set_space(MutableSpace* s) { _space = s; } | |
62 void set_new_top(HeapWord* addr) { _new_top = addr; } | |
63 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } | |
64 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } | |
65 void set_start_array(ObjectStartArray* s) { _start_array = s; } | |
66 | |
67 private: | |
68 MutableSpace* _space; | |
69 HeapWord* _new_top; | |
70 HeapWord* _min_dense_prefix; | |
71 HeapWord* _dense_prefix; | |
72 ObjectStartArray* _start_array; | |
73 }; | |
74 | |
75 class ParallelCompactData | |
76 { | |
77 public: | |
78 // Sizes are in HeapWords, unless indicated otherwise. | |
79 static const size_t Log2ChunkSize; | |
80 static const size_t ChunkSize; | |
81 static const size_t ChunkSizeBytes; | |
82 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
83 // Mask for the bits in a size_t to get an offset within a chunk. |
0 | 84 static const size_t ChunkSizeOffsetMask; |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
85 // Mask for the bits in a pointer to get an offset within a chunk. |
0 | 86 static const size_t ChunkAddrOffsetMask; |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
87 // Mask for the bits in a pointer to get the address of the start of a chunk. |
0 | 88 static const size_t ChunkAddrMask; |
89 | |
90 static const size_t Log2BlockSize; | |
91 static const size_t BlockSize; | |
92 static const size_t BlockOffsetMask; | |
93 static const size_t BlockMask; | |
94 | |
95 static const size_t BlocksPerChunk; | |
96 | |
97 class ChunkData | |
98 { | |
99 public: | |
100 // Destination address of the chunk. | |
101 HeapWord* destination() const { return _destination; } | |
102 | |
103 // The first chunk containing data destined for this chunk. | |
104 size_t source_chunk() const { return _source_chunk; } | |
105 | |
106 // The object (if any) starting in this chunk and ending in a different | |
107 // chunk that could not be updated during the main (parallel) compaction | |
108 // phase. This is different from _partial_obj_addr, which is an object that | |
109 // extends onto a source chunk. However, the two uses do not overlap in | |
110 // time, so the same field is used to save space. | |
111 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; } | |
112 | |
113 // The starting address of the partial object extending onto the chunk. | |
114 HeapWord* partial_obj_addr() const { return _partial_obj_addr; } | |
115 | |
116 // Size of the partial object extending onto the chunk (words). | |
117 size_t partial_obj_size() const { return _partial_obj_size; } | |
118 | |
119 // Size of live data that lies within this chunk due to objects that start | |
120 // in this chunk (words). This does not include the partial object | |
121 // extending onto the chunk (if any), or the part of an object that extends | |
122 // onto the next chunk (if any). | |
123 size_t live_obj_size() const { return _dc_and_los & los_mask; } | |
124 | |
125 // Total live data that lies within the chunk (words). | |
126 size_t data_size() const { return partial_obj_size() + live_obj_size(); } | |
127 | |
128 // The destination_count is the number of other chunks to which data from | |
129 // this chunk will be copied. At the end of the summary phase, the valid | |
130 // values of destination_count are | |
131 // | |
132 // 0 - data from the chunk will be compacted completely into itself, or the | |
133 // chunk is empty. The chunk can be claimed and then filled. | |
134 // 1 - data from the chunk will be compacted into 1 other chunk; some | |
135 // data from the chunk may also be compacted into the chunk itself. | |
136 // 2 - data from the chunk will be copied to 2 other chunks. | |
137 // | |
138 // During compaction as chunks are emptied, the destination_count is | |
139 // decremented (atomically) and when it reaches 0, it can be claimed and | |
140 // then filled. | |
141 // | |
142 // A chunk is claimed for processing by atomically changing the | |
143 // destination_count to the claimed value (dc_claimed). After a chunk has | |
144 // been filled, the destination_count should be set to the completed value | |
145 // (dc_completed). | |
146 inline uint destination_count() const; | |
147 inline uint destination_count_raw() const; | |
148 | |
149 // The location of the java heap data that corresponds to this chunk. | |
150 inline HeapWord* data_location() const; | |
151 | |
152 // The highest address referenced by objects in this chunk. | |
153 inline HeapWord* highest_ref() const; | |
154 | |
155 // Whether this chunk is available to be claimed, has been claimed, or has | |
156 // been completed. | |
157 // | |
158 // Minor subtlety: claimed() returns true if the chunk is marked | |
159 // completed(), which is desirable since a chunk must be claimed before it | |
160 // can be completed. | |
161 bool available() const { return _dc_and_los < dc_one; } | |
162 bool claimed() const { return _dc_and_los >= dc_claimed; } | |
163 bool completed() const { return _dc_and_los >= dc_completed; } | |
164 | |
165 // These are not atomic. | |
166 void set_destination(HeapWord* addr) { _destination = addr; } | |
167 void set_source_chunk(size_t chunk) { _source_chunk = chunk; } | |
168 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } | |
169 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } | |
170 void set_partial_obj_size(size_t words) { | |
171 _partial_obj_size = (chunk_sz_t) words; | |
172 } | |
173 | |
174 inline void set_destination_count(uint count); | |
175 inline void set_live_obj_size(size_t words); | |
176 inline void set_data_location(HeapWord* addr); | |
177 inline void set_completed(); | |
178 inline bool claim_unsafe(); | |
179 | |
180 // These are atomic. | |
181 inline void add_live_obj(size_t words); | |
182 inline void set_highest_ref(HeapWord* addr); | |
183 inline void decrement_destination_count(); | |
184 inline bool claim(); | |
185 | |
186 private: | |
187 // The type used to represent object sizes within a chunk. | |
188 typedef uint chunk_sz_t; | |
189 | |
190 // Constants for manipulating the _dc_and_los field, which holds both the | |
191 // destination count and live obj size. The live obj size lives at the | |
192 // least significant end so no masking is necessary when adding. | |
193 static const chunk_sz_t dc_shift; // Shift amount. | |
194 static const chunk_sz_t dc_mask; // Mask for destination count. | |
195 static const chunk_sz_t dc_one; // 1, shifted appropriately. | |
196 static const chunk_sz_t dc_claimed; // Chunk has been claimed. | |
197 static const chunk_sz_t dc_completed; // Chunk has been completed. | |
198 static const chunk_sz_t los_mask; // Mask for live obj size. | |
199 | |
200 HeapWord* _destination; | |
201 size_t _source_chunk; | |
202 HeapWord* _partial_obj_addr; | |
203 chunk_sz_t _partial_obj_size; | |
204 chunk_sz_t volatile _dc_and_los; | |
205 #ifdef ASSERT | |
206 // These enable optimizations that are only partially implemented. Use | |
207 // debug builds to prevent the code fragments from breaking. | |
208 HeapWord* _data_location; | |
209 HeapWord* _highest_ref; | |
210 #endif // #ifdef ASSERT | |
211 | |
212 #ifdef ASSERT | |
213 public: | |
214 uint _pushed; // 0 until chunk is pushed onto a worker's stack | |
215 private: | |
216 #endif | |
217 }; | |
218 | |
219 // 'Blocks' allow shorter sections of the bitmap to be searched. Each Block | |
220 // holds an offset, which is the amount of live data in the Chunk to the left | |
221 // of the first live object in the Block. This amount of live data will | |
222 // include any object extending into the block. The first block in | |
223 // a chunk does not include any partial object extending into the | |
224 // the chunk. | |
225 // | |
226 // The offset also encodes the | |
227 // 'parity' of the first 1 bit in the Block: a positive offset means the | |
228 // first 1 bit marks the start of an object, a negative offset means the first | |
229 // 1 bit marks the end of an object. | |
230 class BlockData | |
231 { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
232 public: |
0 | 233 typedef short int blk_ofs_t; |
234 | |
235 blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; } | |
236 blk_ofs_t raw_offset() const { return _offset; } | |
237 void set_first_is_start_bit(bool v) { _first_is_start_bit = v; } | |
238 | |
239 #if 0 | |
240 // The need for this method was anticipated but it is | |
241 // never actually used. Do not include it for now. If | |
242 // it is needed, consider the problem of what is passed | |
243 // as "v". To avoid warning errors the method set_start_bit_offset() | |
244 // was changed to take a size_t as the parameter and to do the | |
245 // check for the possible overflow. Doing the cast in these | |
246 // methods better limits the potential problems because of | |
247 // the size of the field to this class. | |
248 void set_raw_offset(blk_ofs_t v) { _offset = v; } | |
249 #endif | |
250 void set_start_bit_offset(size_t val) { | |
251 assert(val >= 0, "sanity"); | |
252 _offset = (blk_ofs_t) val; | |
253 assert(val == (size_t) _offset, "Value is too large"); | |
254 _first_is_start_bit = true; | |
255 } | |
256 void set_end_bit_offset(size_t val) { | |
257 assert(val >= 0, "sanity"); | |
258 _offset = (blk_ofs_t) val; | |
259 assert(val == (size_t) _offset, "Value is too large"); | |
260 _offset = - _offset; | |
261 _first_is_start_bit = false; | |
262 } | |
263 bool first_is_start_bit() { | |
264 assert(_set_phase > 0, "Not initialized"); | |
265 return _first_is_start_bit; | |
266 } | |
267 bool first_is_end_bit() { | |
268 assert(_set_phase > 0, "Not initialized"); | |
269 return !_first_is_start_bit; | |
270 } | |
271 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
272 private: |
0 | 273 blk_ofs_t _offset; |
274 // This is temporary until the mark_bitmap is separated into | |
275 // a start bit array and an end bit array. | |
276 bool _first_is_start_bit; | |
277 #ifdef ASSERT | |
278 short _set_phase; | |
279 static short _cur_phase; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
280 public: |
0 | 281 static void set_cur_phase(short v) { _cur_phase = v; } |
282 #endif | |
283 }; | |
284 | |
285 public: | |
286 ParallelCompactData(); | |
287 bool initialize(MemRegion covered_region); | |
288 | |
289 size_t chunk_count() const { return _chunk_count; } | |
290 | |
291 // Convert chunk indices to/from ChunkData pointers. | |
292 inline ChunkData* chunk(size_t chunk_idx) const; | |
293 inline size_t chunk(const ChunkData* const chunk_ptr) const; | |
294 | |
295 // Returns true if the given address is contained within the chunk | |
296 bool chunk_contains(size_t chunk_index, HeapWord* addr); | |
297 | |
298 size_t block_count() const { return _block_count; } | |
299 inline BlockData* block(size_t n) const; | |
300 | |
301 // Returns true if the given block is in the given chunk. | |
302 static bool chunk_contains_block(size_t chunk_index, size_t block_index); | |
303 | |
304 void add_obj(HeapWord* addr, size_t len); | |
305 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } | |
306 | |
307 // Fill in the chunks covering [beg, end) so that no data moves; i.e., the | |
308 // destination of chunk n is simply the start of chunk n. The argument beg | |
309 // must be chunk-aligned; end need not be. | |
310 void summarize_dense_prefix(HeapWord* beg, HeapWord* end); | |
311 | |
312 bool summarize(HeapWord* target_beg, HeapWord* target_end, | |
313 HeapWord* source_beg, HeapWord* source_end, | |
314 HeapWord** target_next, HeapWord** source_next = 0); | |
315 | |
316 void clear(); | |
317 void clear_range(size_t beg_chunk, size_t end_chunk); | |
318 void clear_range(HeapWord* beg, HeapWord* end) { | |
319 clear_range(addr_to_chunk_idx(beg), addr_to_chunk_idx(end)); | |
320 } | |
321 | |
322 // Return the number of words between addr and the start of the chunk | |
323 // containing addr. | |
324 inline size_t chunk_offset(const HeapWord* addr) const; | |
325 | |
326 // Convert addresses to/from a chunk index or chunk pointer. | |
327 inline size_t addr_to_chunk_idx(const HeapWord* addr) const; | |
328 inline ChunkData* addr_to_chunk_ptr(const HeapWord* addr) const; | |
329 inline HeapWord* chunk_to_addr(size_t chunk) const; | |
330 inline HeapWord* chunk_to_addr(size_t chunk, size_t offset) const; | |
331 inline HeapWord* chunk_to_addr(const ChunkData* chunk) const; | |
332 | |
333 inline HeapWord* chunk_align_down(HeapWord* addr) const; | |
334 inline HeapWord* chunk_align_up(HeapWord* addr) const; | |
335 inline bool is_chunk_aligned(HeapWord* addr) const; | |
336 | |
337 // Analogous to chunk_offset() for blocks. | |
338 size_t block_offset(const HeapWord* addr) const; | |
339 size_t addr_to_block_idx(const HeapWord* addr) const; | |
340 size_t addr_to_block_idx(const oop obj) const { | |
341 return addr_to_block_idx((HeapWord*) obj); | |
342 } | |
343 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; | |
344 inline HeapWord* block_to_addr(size_t block) const; | |
345 | |
346 // Return the address one past the end of the partial object. | |
347 HeapWord* partial_obj_end(size_t chunk_idx) const; | |
348 | |
349 // Return the new location of the object p after the | |
350 // the compaction. | |
351 HeapWord* calc_new_pointer(HeapWord* addr); | |
352 | |
353 // Same as calc_new_pointer() using blocks. | |
354 HeapWord* block_calc_new_pointer(HeapWord* addr); | |
355 | |
356 // Same as calc_new_pointer() using chunks. | |
357 HeapWord* chunk_calc_new_pointer(HeapWord* addr); | |
358 | |
359 HeapWord* calc_new_pointer(oop p) { | |
360 return calc_new_pointer((HeapWord*) p); | |
361 } | |
362 | |
363 // Return the updated address for the given klass | |
364 klassOop calc_new_klass(klassOop); | |
365 | |
366 // Given a block returns true if the partial object for the | |
367 // corresponding chunk ends in the block. Returns false, otherwise | |
368 // If there is no partial object, returns false. | |
369 bool partial_obj_ends_in_block(size_t block_index); | |
370 | |
371 // Returns the block index for the block | |
372 static size_t block_idx(BlockData* block); | |
373 | |
374 #ifdef ASSERT | |
375 void verify_clear(const PSVirtualSpace* vspace); | |
376 void verify_clear(); | |
377 #endif // #ifdef ASSERT | |
378 | |
379 private: | |
380 bool initialize_block_data(size_t region_size); | |
381 bool initialize_chunk_data(size_t region_size); | |
382 PSVirtualSpace* create_vspace(size_t count, size_t element_size); | |
383 | |
384 private: | |
385 HeapWord* _region_start; | |
386 #ifdef ASSERT | |
387 HeapWord* _region_end; | |
388 #endif // #ifdef ASSERT | |
389 | |
390 PSVirtualSpace* _chunk_vspace; | |
391 ChunkData* _chunk_data; | |
392 size_t _chunk_count; | |
393 | |
394 PSVirtualSpace* _block_vspace; | |
395 BlockData* _block_data; | |
396 size_t _block_count; | |
397 }; | |
398 | |
399 inline uint | |
400 ParallelCompactData::ChunkData::destination_count_raw() const | |
401 { | |
402 return _dc_and_los & dc_mask; | |
403 } | |
404 | |
405 inline uint | |
406 ParallelCompactData::ChunkData::destination_count() const | |
407 { | |
408 return destination_count_raw() >> dc_shift; | |
409 } | |
410 | |
411 inline void | |
412 ParallelCompactData::ChunkData::set_destination_count(uint count) | |
413 { | |
414 assert(count <= (dc_completed >> dc_shift), "count too large"); | |
415 const chunk_sz_t live_sz = (chunk_sz_t) live_obj_size(); | |
416 _dc_and_los = (count << dc_shift) | live_sz; | |
417 } | |
418 | |
419 inline void ParallelCompactData::ChunkData::set_live_obj_size(size_t words) | |
420 { | |
421 assert(words <= los_mask, "would overflow"); | |
422 _dc_and_los = destination_count_raw() | (chunk_sz_t)words; | |
423 } | |
424 | |
425 inline void ParallelCompactData::ChunkData::decrement_destination_count() | |
426 { | |
427 assert(_dc_and_los < dc_claimed, "already claimed"); | |
428 assert(_dc_and_los >= dc_one, "count would go negative"); | |
429 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); | |
430 } | |
431 | |
432 inline HeapWord* ParallelCompactData::ChunkData::data_location() const | |
433 { | |
434 DEBUG_ONLY(return _data_location;) | |
435 NOT_DEBUG(return NULL;) | |
436 } | |
437 | |
438 inline HeapWord* ParallelCompactData::ChunkData::highest_ref() const | |
439 { | |
440 DEBUG_ONLY(return _highest_ref;) | |
441 NOT_DEBUG(return NULL;) | |
442 } | |
443 | |
444 inline void ParallelCompactData::ChunkData::set_data_location(HeapWord* addr) | |
445 { | |
446 DEBUG_ONLY(_data_location = addr;) | |
447 } | |
448 | |
449 inline void ParallelCompactData::ChunkData::set_completed() | |
450 { | |
451 assert(claimed(), "must be claimed first"); | |
452 _dc_and_los = dc_completed | (chunk_sz_t) live_obj_size(); | |
453 } | |
454 | |
455 // MT-unsafe claiming of a chunk. Should only be used during single threaded | |
456 // execution. | |
457 inline bool ParallelCompactData::ChunkData::claim_unsafe() | |
458 { | |
459 if (available()) { | |
460 _dc_and_los |= dc_claimed; | |
461 return true; | |
462 } | |
463 return false; | |
464 } | |
465 | |
466 inline void ParallelCompactData::ChunkData::add_live_obj(size_t words) | |
467 { | |
468 assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); | |
469 Atomic::add((int) words, (volatile int*) &_dc_and_los); | |
470 } | |
471 | |
472 inline void ParallelCompactData::ChunkData::set_highest_ref(HeapWord* addr) | |
473 { | |
474 #ifdef ASSERT | |
475 HeapWord* tmp = _highest_ref; | |
476 while (addr > tmp) { | |
477 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp); | |
478 } | |
479 #endif // #ifdef ASSERT | |
480 } | |
481 | |
482 inline bool ParallelCompactData::ChunkData::claim() | |
483 { | |
484 const int los = (int) live_obj_size(); | |
485 const int old = Atomic::cmpxchg(dc_claimed | los, | |
486 (volatile int*) &_dc_and_los, los); | |
487 return old == los; | |
488 } | |
489 | |
490 inline ParallelCompactData::ChunkData* | |
491 ParallelCompactData::chunk(size_t chunk_idx) const | |
492 { | |
493 assert(chunk_idx <= chunk_count(), "bad arg"); | |
494 return _chunk_data + chunk_idx; | |
495 } | |
496 | |
497 inline size_t | |
498 ParallelCompactData::chunk(const ChunkData* const chunk_ptr) const | |
499 { | |
500 assert(chunk_ptr >= _chunk_data, "bad arg"); | |
501 assert(chunk_ptr <= _chunk_data + chunk_count(), "bad arg"); | |
502 return pointer_delta(chunk_ptr, _chunk_data, sizeof(ChunkData)); | |
503 } | |
504 | |
505 inline ParallelCompactData::BlockData* | |
506 ParallelCompactData::block(size_t n) const { | |
507 assert(n < block_count(), "bad arg"); | |
508 return _block_data + n; | |
509 } | |
510 | |
511 inline size_t | |
512 ParallelCompactData::chunk_offset(const HeapWord* addr) const | |
513 { | |
514 assert(addr >= _region_start, "bad addr"); | |
515 assert(addr <= _region_end, "bad addr"); | |
516 return (size_t(addr) & ChunkAddrOffsetMask) >> LogHeapWordSize; | |
517 } | |
518 | |
519 inline size_t | |
520 ParallelCompactData::addr_to_chunk_idx(const HeapWord* addr) const | |
521 { | |
522 assert(addr >= _region_start, "bad addr"); | |
523 assert(addr <= _region_end, "bad addr"); | |
524 return pointer_delta(addr, _region_start) >> Log2ChunkSize; | |
525 } | |
526 | |
527 inline ParallelCompactData::ChunkData* | |
528 ParallelCompactData::addr_to_chunk_ptr(const HeapWord* addr) const | |
529 { | |
530 return chunk(addr_to_chunk_idx(addr)); | |
531 } | |
532 | |
533 inline HeapWord* | |
534 ParallelCompactData::chunk_to_addr(size_t chunk) const | |
535 { | |
536 assert(chunk <= _chunk_count, "chunk out of range"); | |
537 return _region_start + (chunk << Log2ChunkSize); | |
538 } | |
539 | |
540 inline HeapWord* | |
541 ParallelCompactData::chunk_to_addr(const ChunkData* chunk) const | |
542 { | |
543 return chunk_to_addr(pointer_delta(chunk, _chunk_data, sizeof(ChunkData))); | |
544 } | |
545 | |
546 inline HeapWord* | |
547 ParallelCompactData::chunk_to_addr(size_t chunk, size_t offset) const | |
548 { | |
549 assert(chunk <= _chunk_count, "chunk out of range"); | |
550 assert(offset < ChunkSize, "offset too big"); // This may be too strict. | |
551 return chunk_to_addr(chunk) + offset; | |
552 } | |
553 | |
554 inline HeapWord* | |
555 ParallelCompactData::chunk_align_down(HeapWord* addr) const | |
556 { | |
557 assert(addr >= _region_start, "bad addr"); | |
558 assert(addr < _region_end + ChunkSize, "bad addr"); | |
559 return (HeapWord*)(size_t(addr) & ChunkAddrMask); | |
560 } | |
561 | |
562 inline HeapWord* | |
563 ParallelCompactData::chunk_align_up(HeapWord* addr) const | |
564 { | |
565 assert(addr >= _region_start, "bad addr"); | |
566 assert(addr <= _region_end, "bad addr"); | |
567 return chunk_align_down(addr + ChunkSizeOffsetMask); | |
568 } | |
569 | |
570 inline bool | |
571 ParallelCompactData::is_chunk_aligned(HeapWord* addr) const | |
572 { | |
573 return chunk_offset(addr) == 0; | |
574 } | |
575 | |
576 inline size_t | |
577 ParallelCompactData::block_offset(const HeapWord* addr) const | |
578 { | |
579 assert(addr >= _region_start, "bad addr"); | |
580 assert(addr <= _region_end, "bad addr"); | |
581 return pointer_delta(addr, _region_start) & BlockOffsetMask; | |
582 } | |
583 | |
584 inline size_t | |
585 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const | |
586 { | |
587 assert(addr >= _region_start, "bad addr"); | |
588 assert(addr <= _region_end, "bad addr"); | |
589 return pointer_delta(addr, _region_start) >> Log2BlockSize; | |
590 } | |
591 | |
592 inline ParallelCompactData::BlockData* | |
593 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const | |
594 { | |
595 return block(addr_to_block_idx(addr)); | |
596 } | |
597 | |
598 inline HeapWord* | |
599 ParallelCompactData::block_to_addr(size_t block) const | |
600 { | |
601 assert(block < _block_count, "block out of range"); | |
602 return _region_start + (block << Log2BlockSize); | |
603 } | |
604 | |
605 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the | |
606 // do_addr() method. | |
607 // | |
608 // The closure is initialized with the number of heap words to process | |
609 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr() | |
610 // methods in subclasses should update the total as words are processed. Since | |
611 // only one subclass actually uses this mechanism to terminate iteration, the | |
612 // default initial value is > 0. The implementation is here and not in the | |
613 // single subclass that uses it to avoid making is_full() virtual, and thus | |
614 // adding a virtual call per live object. | |
615 | |
616 class ParMarkBitMapClosure: public StackObj { | |
617 public: | |
618 typedef ParMarkBitMap::idx_t idx_t; | |
619 typedef ParMarkBitMap::IterationStatus IterationStatus; | |
620 | |
621 public: | |
622 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm, | |
623 size_t words = max_uintx); | |
624 | |
625 inline ParCompactionManager* compaction_manager() const; | |
626 inline ParMarkBitMap* bitmap() const; | |
627 inline size_t words_remaining() const; | |
628 inline bool is_full() const; | |
629 inline HeapWord* source() const; | |
630 | |
631 inline void set_source(HeapWord* addr); | |
632 | |
633 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0; | |
634 | |
635 protected: | |
636 inline void decrement_words_remaining(size_t words); | |
637 | |
638 private: | |
639 ParMarkBitMap* const _bitmap; | |
640 ParCompactionManager* const _compaction_manager; | |
641 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger. | |
642 size_t _words_remaining; // Words left to copy. | |
643 | |
644 protected: | |
645 HeapWord* _source; // Next addr that would be read. | |
646 }; | |
647 | |
648 inline | |
649 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap, | |
650 ParCompactionManager* cm, | |
651 size_t words): | |
652 _bitmap(bitmap), _compaction_manager(cm) | |
653 #ifdef ASSERT | |
654 , _initial_words_remaining(words) | |
655 #endif | |
656 { | |
657 _words_remaining = words; | |
658 _source = NULL; | |
659 } | |
660 | |
661 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const { | |
662 return _compaction_manager; | |
663 } | |
664 | |
665 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const { | |
666 return _bitmap; | |
667 } | |
668 | |
669 inline size_t ParMarkBitMapClosure::words_remaining() const { | |
670 return _words_remaining; | |
671 } | |
672 | |
673 inline bool ParMarkBitMapClosure::is_full() const { | |
674 return words_remaining() == 0; | |
675 } | |
676 | |
677 inline HeapWord* ParMarkBitMapClosure::source() const { | |
678 return _source; | |
679 } | |
680 | |
681 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) { | |
682 _source = addr; | |
683 } | |
684 | |
685 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) { | |
686 assert(_words_remaining >= words, "processed too many words"); | |
687 _words_remaining -= words; | |
688 } | |
689 | |
690 // Closure for updating the block data during the summary phase. | |
691 class BitBlockUpdateClosure: public ParMarkBitMapClosure { | |
692 // ParallelCompactData::BlockData::blk_ofs_t _live_data_left; | |
693 size_t _live_data_left; | |
694 size_t _cur_block; | |
695 HeapWord* _chunk_start; | |
696 HeapWord* _chunk_end; | |
697 size_t _chunk_index; | |
698 | |
699 public: | |
700 BitBlockUpdateClosure(ParMarkBitMap* mbm, | |
701 ParCompactionManager* cm, | |
702 size_t chunk_index); | |
703 | |
704 size_t cur_block() { return _cur_block; } | |
705 size_t chunk_index() { return _chunk_index; } | |
706 size_t live_data_left() { return _live_data_left; } | |
707 // Returns true the first bit in the current block (cur_block) is | |
708 // a start bit. | |
709 // Returns true if the current block is within the chunk for the closure; | |
710 bool chunk_contains_cur_block(); | |
711 | |
712 // Set the chunk index and related chunk values for | |
713 // a new chunk. | |
714 void reset_chunk(size_t chunk_index); | |
715 | |
716 virtual IterationStatus do_addr(HeapWord* addr, size_t words); | |
717 }; | |
718 | |
719 class PSParallelCompact : AllStatic { | |
720 public: | |
721 // Convenient access to type names. | |
722 typedef ParMarkBitMap::idx_t idx_t; | |
723 typedef ParallelCompactData::ChunkData ChunkData; | |
724 typedef ParallelCompactData::BlockData BlockData; | |
725 | |
726 typedef enum { | |
727 perm_space_id, old_space_id, eden_space_id, | |
728 from_space_id, to_space_id, last_space_id | |
729 } SpaceId; | |
730 | |
731 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
732 // Inline closure decls |
0 | 733 // |
734 class IsAliveClosure: public BoolObjectClosure { | |
735 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
736 virtual void do_object(oop p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
737 virtual bool do_object_b(oop p); |
0 | 738 }; |
739 | |
740 class KeepAliveClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
741 private: |
0 | 742 ParCompactionManager* _compaction_manager; |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
743 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
744 template <class T> inline void do_oop_work(T* p); |
0 | 745 public: |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
746 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
747 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
748 virtual void do_oop(narrowOop* p); |
0 | 749 }; |
750 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
751 // Current unused |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
752 class FollowRootClosure: public OopsInGenClosure { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
753 private: |
0 | 754 ParCompactionManager* _compaction_manager; |
755 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
756 FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
757 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
758 virtual void do_oop(narrowOop* p); |
0 | 759 virtual const bool do_nmethods() const { return true; } |
760 }; | |
761 | |
762 class FollowStackClosure: public VoidClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
763 private: |
0 | 764 ParCompactionManager* _compaction_manager; |
765 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
766 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
767 virtual void do_void(); |
0 | 768 }; |
769 | |
770 class AdjustPointerClosure: public OopsInGenClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
771 private: |
0 | 772 bool _is_root; |
773 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
774 AdjustPointerClosure(bool is_root) : _is_root(is_root) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
775 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
776 virtual void do_oop(narrowOop* p); |
0 | 777 }; |
778 | |
779 // Closure for verifying update of pointers. Does not | |
780 // have any side effects. | |
781 class VerifyUpdateClosure: public ParMarkBitMapClosure { | |
782 const MutableSpace* _space; // Is this ever used? | |
783 | |
784 public: | |
785 VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) : | |
786 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), _space(sp) | |
787 { } | |
788 | |
789 virtual IterationStatus do_addr(HeapWord* addr, size_t words); | |
790 | |
791 const MutableSpace* space() { return _space; } | |
792 }; | |
793 | |
794 // Closure for updating objects altered for debug checking | |
795 class ResetObjectsClosure: public ParMarkBitMapClosure { | |
796 public: | |
797 ResetObjectsClosure(ParCompactionManager* cm): | |
798 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm) | |
799 { } | |
800 | |
801 virtual IterationStatus do_addr(HeapWord* addr, size_t words); | |
802 }; | |
803 | |
804 friend class KeepAliveClosure; | |
805 friend class FollowStackClosure; | |
806 friend class AdjustPointerClosure; | |
807 friend class FollowRootClosure; | |
808 friend class instanceKlassKlass; | |
809 friend class RefProcTaskProxy; | |
810 | |
811 private: | |
812 static elapsedTimer _accumulated_time; | |
813 static unsigned int _total_invocations; | |
814 static unsigned int _maximum_compaction_gc_num; | |
815 static jlong _time_of_last_gc; // ms | |
816 static CollectorCounters* _counters; | |
817 static ParMarkBitMap _mark_bitmap; | |
818 static ParallelCompactData _summary_data; | |
819 static IsAliveClosure _is_alive_closure; | |
820 static SpaceInfo _space_info[last_space_id]; | |
821 static bool _print_phases; | |
822 static AdjustPointerClosure _adjust_root_pointer_closure; | |
823 static AdjustPointerClosure _adjust_pointer_closure; | |
824 | |
825 // Reference processing (used in ...follow_contents) | |
826 static ReferenceProcessor* _ref_processor; | |
827 | |
828 // Updated location of intArrayKlassObj. | |
829 static klassOop _updated_int_array_klass_obj; | |
830 | |
831 // Values computed at initialization and used by dead_wood_limiter(). | |
832 static double _dwl_mean; | |
833 static double _dwl_std_dev; | |
834 static double _dwl_first_term; | |
835 static double _dwl_adjustment; | |
836 #ifdef ASSERT | |
837 static bool _dwl_initialized; | |
838 #endif // #ifdef ASSERT | |
839 | |
840 private: | |
841 // Closure accessors | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
842 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } |
0 | 843 static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
844 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } |
0 | 845 |
846 static void initialize_space_info(); | |
847 | |
848 // Return true if details about individual phases should be printed. | |
849 static inline bool print_phases(); | |
850 | |
851 // Clear the marking bitmap and summary data that cover the specified space. | |
852 static void clear_data_covering_space(SpaceId id); | |
853 | |
854 static void pre_compact(PreGCValues* pre_gc_values); | |
855 static void post_compact(); | |
856 | |
857 // Mark live objects | |
858 static void marking_phase(ParCompactionManager* cm, | |
859 bool maximum_heap_compaction); | |
860 static void follow_stack(ParCompactionManager* cm); | |
861 static void follow_weak_klass_links(ParCompactionManager* cm); | |
862 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
863 template <class T> static inline void adjust_pointer(T* p, bool is_root); |
0 | 864 static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } |
865 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
866 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
867 static inline void follow_root(ParCompactionManager* cm, T* p); |
0 | 868 |
869 // Compute the dense prefix for the designated space. This is an experimental | |
870 // implementation currently not used in production. | |
871 static HeapWord* compute_dense_prefix_via_density(const SpaceId id, | |
872 bool maximum_compaction); | |
873 | |
874 // Methods used to compute the dense prefix. | |
875 | |
876 // Compute the value of the normal distribution at x = density. The mean and | |
877 // standard deviation are values saved by initialize_dead_wood_limiter(). | |
878 static inline double normal_distribution(double density); | |
879 | |
880 // Initialize the static vars used by dead_wood_limiter(). | |
881 static void initialize_dead_wood_limiter(); | |
882 | |
883 // Return the percentage of space that can be treated as "dead wood" (i.e., | |
884 // not reclaimed). | |
885 static double dead_wood_limiter(double density, size_t min_percent); | |
886 | |
887 // Find the first (left-most) chunk in the range [beg, end) that has at least | |
888 // dead_words of dead space to the left. The argument beg must be the first | |
889 // chunk in the space that is not completely live. | |
890 static ChunkData* dead_wood_limit_chunk(const ChunkData* beg, | |
891 const ChunkData* end, | |
892 size_t dead_words); | |
893 | |
894 // Return a pointer to the first chunk in the range [beg, end) that is not | |
895 // completely full. | |
896 static ChunkData* first_dead_space_chunk(const ChunkData* beg, | |
897 const ChunkData* end); | |
898 | |
899 // Return a value indicating the benefit or 'yield' if the compacted region | |
900 // were to start (or equivalently if the dense prefix were to end) at the | |
901 // candidate chunk. Higher values are better. | |
902 // | |
903 // The value is based on the amount of space reclaimed vs. the costs of (a) | |
904 // updating references in the dense prefix plus (b) copying objects and | |
905 // updating references in the compacted region. | |
906 static inline double reclaimed_ratio(const ChunkData* const candidate, | |
907 HeapWord* const bottom, | |
908 HeapWord* const top, | |
909 HeapWord* const new_top); | |
910 | |
911 // Compute the dense prefix for the designated space. | |
912 static HeapWord* compute_dense_prefix(const SpaceId id, | |
913 bool maximum_compaction); | |
914 | |
915 // Return true if dead space crosses onto the specified Chunk; bit must be the | |
916 // bit index corresponding to the first word of the Chunk. | |
917 static inline bool dead_space_crosses_boundary(const ChunkData* chunk, | |
918 idx_t bit); | |
919 | |
920 // Summary phase utility routine to fill dead space (if any) at the dense | |
921 // prefix boundary. Should only be called if the the dense prefix is | |
922 // non-empty. | |
923 static void fill_dense_prefix_end(SpaceId id); | |
924 | |
925 static void summarize_spaces_quick(); | |
926 static void summarize_space(SpaceId id, bool maximum_compaction); | |
927 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); | |
928 | |
929 static bool block_first_offset(size_t block_index, idx_t* block_offset_ptr); | |
930 | |
931 // Fill in the BlockData | |
932 static void summarize_blocks(ParCompactionManager* cm, | |
933 SpaceId first_compaction_space_id); | |
934 | |
935 // The space that is compacted after space_id. | |
936 static SpaceId next_compaction_space_id(SpaceId space_id); | |
937 | |
938 // Adjust addresses in roots. Does not adjust addresses in heap. | |
939 static void adjust_roots(); | |
940 | |
941 // Serial code executed in preparation for the compaction phase. | |
942 static void compact_prologue(); | |
943 | |
944 // Move objects to new locations. | |
945 static void compact_perm(ParCompactionManager* cm); | |
946 static void compact(); | |
947 | |
948 // Add available chunks to the stack and draining tasks to the task queue. | |
949 static void enqueue_chunk_draining_tasks(GCTaskQueue* q, | |
950 uint parallel_gc_threads); | |
951 | |
952 // Add dense prefix update tasks to the task queue. | |
953 static void enqueue_dense_prefix_tasks(GCTaskQueue* q, | |
954 uint parallel_gc_threads); | |
955 | |
956 // Add chunk stealing tasks to the task queue. | |
957 static void enqueue_chunk_stealing_tasks( | |
958 GCTaskQueue* q, | |
959 ParallelTaskTerminator* terminator_ptr, | |
960 uint parallel_gc_threads); | |
961 | |
962 // For debugging only - compacts the old gen serially | |
963 static void compact_serial(ParCompactionManager* cm); | |
964 | |
965 // If objects are left in eden after a collection, try to move the boundary | |
966 // and absorb them into the old gen. Returns true if eden was emptied. | |
967 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, | |
968 PSYoungGen* young_gen, | |
969 PSOldGen* old_gen); | |
970 | |
971 // Reset time since last full gc | |
972 static void reset_millis_since_last_gc(); | |
973 | |
974 protected: | |
975 #ifdef VALIDATE_MARK_SWEEP | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
976 static GrowableArray<void*>* _root_refs_stack; |
0 | 977 static GrowableArray<oop> * _live_oops; |
978 static GrowableArray<oop> * _live_oops_moved_to; | |
979 static GrowableArray<size_t>* _live_oops_size; | |
980 static size_t _live_oops_index; | |
981 static size_t _live_oops_index_at_perm; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
982 static GrowableArray<void*>* _other_refs_stack; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
983 static GrowableArray<void*>* _adjusted_pointers; |
0 | 984 static bool _pointer_tracking; |
985 static bool _root_tracking; | |
986 | |
987 // The following arrays are saved since the time of the last GC and | |
988 // assist in tracking down problems where someone has done an errant | |
989 // store into the heap, usually to an oop that wasn't properly | |
990 // handleized across a GC. If we crash or otherwise fail before the | |
991 // next GC, we can query these arrays to find out the object we had | |
992 // intended to do the store to (assuming it is still alive) and the | |
993 // offset within that object. Covered under RecordMarkSweepCompaction. | |
994 static GrowableArray<HeapWord*> * _cur_gc_live_oops; | |
995 static GrowableArray<HeapWord*> * _cur_gc_live_oops_moved_to; | |
996 static GrowableArray<size_t>* _cur_gc_live_oops_size; | |
997 static GrowableArray<HeapWord*> * _last_gc_live_oops; | |
998 static GrowableArray<HeapWord*> * _last_gc_live_oops_moved_to; | |
999 static GrowableArray<size_t>* _last_gc_live_oops_size; | |
1000 #endif | |
1001 | |
1002 public: | |
1003 class MarkAndPushClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1004 private: |
0 | 1005 ParCompactionManager* _compaction_manager; |
1006 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1007 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1008 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1009 virtual void do_oop(narrowOop* p); |
0 | 1010 virtual const bool do_nmethods() const { return true; } |
1011 }; | |
1012 | |
1013 PSParallelCompact(); | |
1014 | |
1015 // Convenient accessor for Universe::heap(). | |
1016 static ParallelScavengeHeap* gc_heap() { | |
1017 return (ParallelScavengeHeap*)Universe::heap(); | |
1018 } | |
1019 | |
1020 static void invoke(bool maximum_heap_compaction); | |
1021 static void invoke_no_policy(bool maximum_heap_compaction); | |
1022 | |
1023 static void post_initialize(); | |
1024 // Perform initialization for PSParallelCompact that requires | |
1025 // allocations. This should be called during the VM initialization | |
1026 // at a pointer where it would be appropriate to return a JNI_ENOMEM | |
1027 // in the event of a failure. | |
1028 static bool initialize(); | |
1029 | |
1030 // Public accessors | |
1031 static elapsedTimer* accumulated_time() { return &_accumulated_time; } | |
1032 static unsigned int total_invocations() { return _total_invocations; } | |
1033 static CollectorCounters* counters() { return _counters; } | |
1034 | |
1035 // Used to add tasks | |
1036 static GCTaskManager* const gc_task_manager(); | |
1037 static klassOop updated_int_array_klass_obj() { | |
1038 return _updated_int_array_klass_obj; | |
1039 } | |
1040 | |
1041 // Marking support | |
1042 static inline bool mark_obj(oop obj); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1043 // Check mark and maybe push on marking stack |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1044 template <class T> static inline void mark_and_push(ParCompactionManager* cm, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1045 T* p); |
0 | 1046 |
1047 // Compaction support. | |
1048 // Return true if p is in the range [beg_addr, end_addr). | |
1049 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr); | |
1050 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr); | |
1051 | |
1052 // Convenience wrappers for per-space data kept in _space_info. | |
1053 static inline MutableSpace* space(SpaceId space_id); | |
1054 static inline HeapWord* new_top(SpaceId space_id); | |
1055 static inline HeapWord* dense_prefix(SpaceId space_id); | |
1056 static inline ObjectStartArray* start_array(SpaceId space_id); | |
1057 | |
1058 // Return true if the klass should be updated. | |
1059 static inline bool should_update_klass(klassOop k); | |
1060 | |
1061 // Move and update the live objects in the specified space. | |
1062 static void move_and_update(ParCompactionManager* cm, SpaceId space_id); | |
1063 | |
1064 // Process the end of the given chunk range in the dense prefix. | |
1065 // This includes saving any object not updated. | |
1066 static void dense_prefix_chunks_epilogue(ParCompactionManager* cm, | |
1067 size_t chunk_start_index, | |
1068 size_t chunk_end_index, | |
1069 idx_t exiting_object_offset, | |
1070 idx_t chunk_offset_start, | |
1071 idx_t chunk_offset_end); | |
1072 | |
1073 // Update a chunk in the dense prefix. For each live object | |
1074 // in the chunk, update it's interior references. For each | |
1075 // dead object, fill it with deadwood. Dead space at the end | |
1076 // of a chunk range will be filled to the start of the next | |
1077 // live object regardless of the chunk_index_end. None of the | |
1078 // objects in the dense prefix move and dead space is dead | |
1079 // (holds only dead objects that don't need any processing), so | |
1080 // dead space can be filled in any order. | |
1081 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, | |
1082 SpaceId space_id, | |
1083 size_t chunk_index_start, | |
1084 size_t chunk_index_end); | |
1085 | |
1086 // Return the address of the count + 1st live word in the range [beg, end). | |
1087 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count); | |
1088 | |
1089 // Return the address of the word to be copied to dest_addr, which must be | |
1090 // aligned to a chunk boundary. | |
1091 static HeapWord* first_src_addr(HeapWord* const dest_addr, | |
1092 size_t src_chunk_idx); | |
1093 | |
1094 // Determine the next source chunk, set closure.source() to the start of the | |
1095 // new chunk return the chunk index. Parameter end_addr is the address one | |
1096 // beyond the end of source range just processed. If necessary, switch to a | |
1097 // new source space and set src_space_id (in-out parameter) and src_space_top | |
1098 // (out parameter) accordingly. | |
1099 static size_t next_src_chunk(MoveAndUpdateClosure& closure, | |
1100 SpaceId& src_space_id, | |
1101 HeapWord*& src_space_top, | |
1102 HeapWord* end_addr); | |
1103 | |
1104 // Decrement the destination count for each non-empty source chunk in the | |
1105 // range [beg_chunk, chunk(chunk_align_up(end_addr))). | |
1106 static void decrement_destination_counts(ParCompactionManager* cm, | |
1107 size_t beg_chunk, | |
1108 HeapWord* end_addr); | |
1109 | |
1110 // Fill a chunk, copying objects from one or more source chunks. | |
1111 static void fill_chunk(ParCompactionManager* cm, size_t chunk_idx); | |
1112 static void fill_and_update_chunk(ParCompactionManager* cm, size_t chunk) { | |
1113 fill_chunk(cm, chunk); | |
1114 } | |
1115 | |
1116 // Update the deferred objects in the space. | |
1117 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); | |
1118 | |
1119 // Mark pointer and follow contents. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1120 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1121 static inline void mark_and_follow(ParCompactionManager* cm, T* p); |
0 | 1122 |
1123 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } | |
1124 static ParallelCompactData& summary_data() { return _summary_data; } | |
1125 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1126 static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1127 static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1128 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1129 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1130 static inline void adjust_pointer(T* p, |
0 | 1131 HeapWord* beg_addr, |
1132 HeapWord* end_addr); | |
1133 | |
1134 // Reference Processing | |
1135 static ReferenceProcessor* const ref_processor() { return _ref_processor; } | |
1136 | |
1137 // Return the SpaceId for the given address. | |
1138 static SpaceId space_id(HeapWord* addr); | |
1139 | |
1140 // Time since last full gc (in milliseconds). | |
1141 static jlong millis_since_last_gc(); | |
1142 | |
1143 #ifdef VALIDATE_MARK_SWEEP | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1144 static void track_adjusted_pointer(void* p, bool isroot); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1145 static void check_adjust_pointer(void* p); |
0 | 1146 static void track_interior_pointers(oop obj); |
1147 static void check_interior_pointers(); | |
1148 | |
1149 static void reset_live_oop_tracking(bool at_perm); | |
1150 static void register_live_oop(oop p, size_t size); | |
1151 static void validate_live_oop(oop p, size_t size); | |
1152 static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top); | |
1153 static void compaction_complete(); | |
1154 | |
1155 // Querying operation of RecordMarkSweepCompaction results. | |
1156 // Finds and prints the current base oop and offset for a word | |
1157 // within an oop that was live during the last GC. Helpful for | |
1158 // tracking down heap stomps. | |
1159 static void print_new_location_of_heap_address(HeapWord* q); | |
1160 #endif // #ifdef VALIDATE_MARK_SWEEP | |
1161 | |
1162 // Call backs for class unloading | |
1163 // Update subklass/sibling/implementor links at end of marking. | |
1164 static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k); | |
1165 | |
1166 #ifndef PRODUCT | |
1167 // Debugging support. | |
1168 static const char* space_names[last_space_id]; | |
1169 static void print_chunk_ranges(); | |
1170 static void print_dense_prefix_stats(const char* const algorithm, | |
1171 const SpaceId id, | |
1172 const bool maximum_compaction, | |
1173 HeapWord* const addr); | |
1174 #endif // #ifndef PRODUCT | |
1175 | |
1176 #ifdef ASSERT | |
1177 // Verify that all the chunks have been emptied. | |
1178 static void verify_complete(SpaceId space_id); | |
1179 #endif // #ifdef ASSERT | |
1180 }; | |
1181 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1182 inline bool PSParallelCompact::mark_obj(oop obj) { |
0 | 1183 const int obj_size = obj->size(); |
1184 if (mark_bitmap()->mark_obj(obj, obj_size)) { | |
1185 _summary_data.add_obj(obj, obj_size); | |
1186 return true; | |
1187 } else { | |
1188 return false; | |
1189 } | |
1190 } | |
1191 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1192 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1193 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1194 assert(!Universe::heap()->is_in_reserved(p), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1195 "roots shouldn't be things within the heap"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1196 #ifdef VALIDATE_MARK_SWEEP |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1197 if (ValidateMarkSweep) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1198 guarantee(!_root_refs_stack->contains(p), "should only be in here once"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1199 _root_refs_stack->push(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1200 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1201 #endif |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1202 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1203 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1204 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1205 if (mark_bitmap()->is_unmarked(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1206 if (mark_obj(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1207 obj->follow_contents(cm); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1208 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1209 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1210 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1211 follow_stack(cm); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1212 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1213 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1214 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1215 inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1216 T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1217 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1218 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1219 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1220 if (mark_bitmap()->is_unmarked(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1221 if (mark_obj(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1222 obj->follow_contents(cm); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1223 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1224 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1225 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1226 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1227 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1228 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1229 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1230 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1231 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1232 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1233 if (mark_bitmap()->is_unmarked(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1234 if (mark_obj(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1235 // This thread marked the object and owns the subsequent processing of it. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1236 cm->save_for_scanning(obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1237 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1238 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1239 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1240 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1241 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1242 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1243 inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1244 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1245 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1246 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1247 oop new_obj = (oop)summary_data().calc_new_pointer(obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1248 assert(new_obj != NULL || // is forwarding ptr? |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1249 obj->is_shared(), // never forwarded? |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1250 "should be forwarded"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1251 // Just always do the update unconditionally? |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1252 if (new_obj != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1253 assert(Universe::heap()->is_in_reserved(new_obj), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1254 "should be in object space"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1255 oopDesc::encode_store_heap_oop_not_null(p, new_obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1256 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1257 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1258 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot)); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1259 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1260 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1261 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1262 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1263 #ifdef VALIDATE_MARK_SWEEP |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1264 if (ValidateMarkSweep) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1265 if (!Universe::heap()->is_in_reserved(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1266 _root_refs_stack->push(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1267 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1268 _other_refs_stack->push(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1269 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1270 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1271 #endif |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1272 mark_and_push(_compaction_manager, p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1273 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1274 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1275 inline bool PSParallelCompact::print_phases() { |
0 | 1276 return _print_phases; |
1277 } | |
1278 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1279 inline double PSParallelCompact::normal_distribution(double density) { |
0 | 1280 assert(_dwl_initialized, "uninitialized"); |
1281 const double squared_term = (density - _dwl_mean) / _dwl_std_dev; | |
1282 return _dwl_first_term * exp(-0.5 * squared_term * squared_term); | |
1283 } | |
1284 | |
1285 inline bool | |
1286 PSParallelCompact::dead_space_crosses_boundary(const ChunkData* chunk, | |
1287 idx_t bit) | |
1288 { | |
1289 assert(bit > 0, "cannot call this for the first bit/chunk"); | |
1290 assert(_summary_data.chunk_to_addr(chunk) == _mark_bitmap.bit_to_addr(bit), | |
1291 "sanity check"); | |
1292 | |
1293 // Dead space crosses the boundary if (1) a partial object does not extend | |
1294 // onto the chunk, (2) an object does not start at the beginning of the chunk, | |
1295 // and (3) an object does not end at the end of the prior chunk. | |
1296 return chunk->partial_obj_size() == 0 && | |
1297 !_mark_bitmap.is_obj_beg(bit) && | |
1298 !_mark_bitmap.is_obj_end(bit - 1); | |
1299 } | |
1300 | |
1301 inline bool | |
1302 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) { | |
1303 return p >= beg_addr && p < end_addr; | |
1304 } | |
1305 | |
1306 inline bool | |
1307 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) { | |
1308 return is_in((HeapWord*)p, beg_addr, end_addr); | |
1309 } | |
1310 | |
1311 inline MutableSpace* PSParallelCompact::space(SpaceId id) { | |
1312 assert(id < last_space_id, "id out of range"); | |
1313 return _space_info[id].space(); | |
1314 } | |
1315 | |
1316 inline HeapWord* PSParallelCompact::new_top(SpaceId id) { | |
1317 assert(id < last_space_id, "id out of range"); | |
1318 return _space_info[id].new_top(); | |
1319 } | |
1320 | |
1321 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) { | |
1322 assert(id < last_space_id, "id out of range"); | |
1323 return _space_info[id].dense_prefix(); | |
1324 } | |
1325 | |
1326 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) { | |
1327 assert(id < last_space_id, "id out of range"); | |
1328 return _space_info[id].start_array(); | |
1329 } | |
1330 | |
1331 inline bool PSParallelCompact::should_update_klass(klassOop k) { | |
1332 return ((HeapWord*) k) >= dense_prefix(perm_space_id); | |
1333 } | |
1334 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1335 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1336 inline void PSParallelCompact::adjust_pointer(T* p, |
0 | 1337 HeapWord* beg_addr, |
1338 HeapWord* end_addr) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1339 if (is_in((HeapWord*)p, beg_addr, end_addr)) { |
0 | 1340 adjust_pointer(p); |
1341 } | |
1342 } | |
1343 | |
1344 class MoveAndUpdateClosure: public ParMarkBitMapClosure { | |
1345 public: | |
1346 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, | |
1347 ObjectStartArray* start_array, | |
1348 HeapWord* destination, size_t words); | |
1349 | |
1350 // Accessors. | |
1351 HeapWord* destination() const { return _destination; } | |
1352 | |
1353 // If the object will fit (size <= words_remaining()), copy it to the current | |
1354 // destination, update the interior oops and the start array and return either | |
1355 // full (if the closure is full) or incomplete. If the object will not fit, | |
1356 // return would_overflow. | |
1357 virtual IterationStatus do_addr(HeapWord* addr, size_t size); | |
1358 | |
1359 // Copy enough words to fill this closure, starting at source(). Interior | |
1360 // oops and the start array are not updated. Return full. | |
1361 IterationStatus copy_until_full(); | |
1362 | |
1363 // Copy enough words to fill this closure or to the end of an object, | |
1364 // whichever is smaller, starting at source(). Interior oops and the start | |
1365 // array are not updated. | |
1366 void copy_partial_obj(); | |
1367 | |
1368 protected: | |
1369 // Update variables to indicate that word_count words were processed. | |
1370 inline void update_state(size_t word_count); | |
1371 | |
1372 protected: | |
1373 ObjectStartArray* const _start_array; | |
1374 HeapWord* _destination; // Next addr to be written. | |
1375 }; | |
1376 | |
1377 inline | |
1378 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap, | |
1379 ParCompactionManager* cm, | |
1380 ObjectStartArray* start_array, | |
1381 HeapWord* destination, | |
1382 size_t words) : | |
1383 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array) | |
1384 { | |
1385 _destination = destination; | |
1386 } | |
1387 | |
1388 inline void MoveAndUpdateClosure::update_state(size_t words) | |
1389 { | |
1390 decrement_words_remaining(words); | |
1391 _source += words; | |
1392 _destination += words; | |
1393 } | |
1394 | |
1395 class UpdateOnlyClosure: public ParMarkBitMapClosure { | |
1396 private: | |
1397 const PSParallelCompact::SpaceId _space_id; | |
1398 ObjectStartArray* const _start_array; | |
1399 | |
1400 public: | |
1401 UpdateOnlyClosure(ParMarkBitMap* mbm, | |
1402 ParCompactionManager* cm, | |
1403 PSParallelCompact::SpaceId space_id); | |
1404 | |
1405 // Update the object. | |
1406 virtual IterationStatus do_addr(HeapWord* addr, size_t words); | |
1407 | |
1408 inline void do_addr(HeapWord* addr); | |
1409 }; | |
1410 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1411 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1412 { |
0 | 1413 _start_array->allocate_block(addr); |
1414 oop(addr)->update_contents(compaction_manager()); | |
1415 } | |
1416 | |
1417 class FillClosure: public ParMarkBitMapClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1418 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1419 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : |
0 | 1420 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), |
1421 _space_id(space_id), | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1422 _start_array(PSParallelCompact::start_array(space_id)) { |
0 | 1423 assert(_space_id == PSParallelCompact::perm_space_id || |
1424 _space_id == PSParallelCompact::old_space_id, | |
1425 "cannot use FillClosure in the young gen"); | |
1426 assert(bitmap() != NULL, "need a bitmap"); | |
1427 assert(_start_array != NULL, "need a start array"); | |
1428 } | |
1429 | |
1430 void fill_region(HeapWord* addr, size_t size) { | |
1431 MemRegion region(addr, size); | |
1432 SharedHeap::fill_region_with_object(region); | |
1433 _start_array->allocate_block(addr); | |
1434 } | |
1435 | |
1436 virtual IterationStatus do_addr(HeapWord* addr, size_t size) { | |
1437 fill_region(addr, size); | |
1438 return ParMarkBitMap::incomplete; | |
1439 } | |
1440 | |
1441 private: | |
1442 const PSParallelCompact::SpaceId _space_id; | |
1443 ObjectStartArray* const _start_array; | |
1444 }; |