Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 c2844108a708 |
children |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
169 // adds them to "fl", which is required to be an empty free list. | 169 // adds them to "fl", which is required to be an empty free list. |
170 // If the count of "fl" is negative, it's absolute value indicates a | 170 // If the count of "fl" is negative, it's absolute value indicates a |
171 // number of free chunks that had been previously "borrowed" from global | 171 // number of free chunks that had been previously "borrowed" from global |
172 // list of size "word_sz", and must now be decremented. | 172 // list of size "word_sz", and must now be decremented. |
173 void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); | 173 void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
174 | |
175 // Used by par_get_chunk_of_blocks() for the chunks from the | |
176 // indexed_free_lists. | |
177 bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); | |
178 | |
179 // Used by par_get_chunk_of_blocks_dictionary() to get a chunk | |
180 // evenly splittable into "n" "word_sz" chunks. Returns that | |
181 // evenly splittable chunk. May split a larger chunk to get the | |
182 // evenly splittable chunk. | |
183 FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); | |
184 | |
185 // Used by par_get_chunk_of_blocks() for the chunks from the | |
186 // dictionary. | |
187 void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); | |
174 | 188 |
175 // Allocation helper functions | 189 // Allocation helper functions |
176 // Allocate using a strategy that takes from the indexed free lists | 190 // Allocate using a strategy that takes from the indexed free lists |
177 // first. This allocation strategy assumes a companion sweeping | 191 // first. This allocation strategy assumes a companion sweeping |
178 // strategy that attempts to keep the needed number of chunks in each | 192 // strategy that attempts to keep the needed number of chunks in each |
335 return MemRegion(bottom(), | 349 return MemRegion(bottom(), |
336 BlockOffsetArrayUseUnallocatedBlock ? | 350 BlockOffsetArrayUseUnallocatedBlock ? |
337 unallocated_block() : end()); | 351 unallocated_block() : end()); |
338 } | 352 } |
339 | 353 |
340 bool is_in(const void* p) const { | |
341 return used_region().contains(p); | |
342 } | |
343 | |
344 virtual bool is_free_block(const HeapWord* p) const; | 354 virtual bool is_free_block(const HeapWord* p) const; |
345 | 355 |
346 // Resizing support | 356 // Resizing support |
347 void set_end(HeapWord* value); // override | 357 void set_end(HeapWord* value); // override |
348 | 358 |
349 // mutual exclusion support | 359 // mutual exclusion support |
350 Mutex* freelistLock() const { return &_freelistLock; } | 360 Mutex* freelistLock() const { return &_freelistLock; } |
351 | 361 |
352 // Iteration support | 362 // Iteration support |
353 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); | |
354 void oop_iterate(ExtendedOopClosure* cl); | 363 void oop_iterate(ExtendedOopClosure* cl); |
355 | 364 |
356 void object_iterate(ObjectClosure* blk); | 365 void object_iterate(ObjectClosure* blk); |
357 // Apply the closure to each object in the space whose references | 366 // Apply the closure to each object in the space whose references |
358 // point to objects in the heap. The usage of CompactibleFreeListSpace | 367 // point to objects in the heap. The usage of CompactibleFreeListSpace |
361 // valid. For example, an object may reference another object | 370 // valid. For example, an object may reference another object |
362 // that has already been sweep up (collected). This method uses | 371 // that has already been sweep up (collected). This method uses |
363 // obj_is_alive() to determine whether it is safe to iterate of | 372 // obj_is_alive() to determine whether it is safe to iterate of |
364 // an object. | 373 // an object. |
365 void safe_object_iterate(ObjectClosure* blk); | 374 void safe_object_iterate(ObjectClosure* blk); |
375 | |
376 // Iterate over all objects that intersect with mr, calling "cl->do_object" | |
377 // on each. There is an exception to this: if this closure has already | |
378 // been invoked on an object, it may skip such objects in some cases. This is | |
379 // Most likely to happen in an "upwards" (ascending address) iteration of | |
380 // MemRegions. | |
366 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); | 381 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
367 | 382 |
368 // Requires that "mr" be entirely within the space. | 383 // Requires that "mr" be entirely within the space. |
369 // Apply "cl->do_object" to all objects that intersect with "mr". | 384 // Apply "cl->do_object" to all objects that intersect with "mr". |
370 // If the iteration encounters an unparseable portion of the region, | 385 // If the iteration encounters an unparseable portion of the region, |
371 // terminate the iteration and return the address of the start of the | 386 // terminate the iteration and return the address of the start of the |
372 // subregion that isn't done. Return of "NULL" indicates that the | 387 // subregion that isn't done. Return of "NULL" indicates that the |
373 // interation completed. | 388 // interation completed. |
374 virtual HeapWord* | 389 HeapWord* object_iterate_careful_m(MemRegion mr, |
375 object_iterate_careful_m(MemRegion mr, | 390 ObjectClosureCareful* cl); |
376 ObjectClosureCareful* cl); | |
377 virtual HeapWord* | |
378 object_iterate_careful(ObjectClosureCareful* cl); | |
379 | 391 |
380 // Override: provides a DCTO_CL specific to this kind of space. | 392 // Override: provides a DCTO_CL specific to this kind of space. |
381 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, | 393 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
382 CardTableModRefBS::PrecisionStyle precision, | 394 CardTableModRefBS::PrecisionStyle precision, |
383 HeapWord* boundary); | 395 HeapWord* boundary); |