Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @ 14909:4ca6dc0799b6
Backout jdk9 merge
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Tue, 01 Apr 2014 13:57:07 +0200 |
parents | 63a4eb8bcd23 |
children | 89152779163c |
comparison
equal
deleted
inserted
replaced
14908:8db6e76cb658 | 14909:4ca6dc0799b6 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
56 _allocation_size_limit = allocation_size_limit; | 56 _allocation_size_limit = allocation_size_limit; |
57 } | 57 } |
58 HeapWord* _ptr; | 58 HeapWord* _ptr; |
59 size_t _word_size; | 59 size_t _word_size; |
60 size_t _refillSize; | 60 size_t _refillSize; |
61 size_t _allocation_size_limit; // Largest size that will be allocated | 61 size_t _allocation_size_limit; // largest size that will be allocated |
62 | 62 |
63 void print_on(outputStream* st) const; | 63 void print_on(outputStream* st) const; |
64 }; | 64 }; |
65 | 65 |
66 // Concrete subclass of CompactibleSpace that implements | 66 // Concrete subclass of CompactibleSpace that implements |
114 FreeBlockBestFitFirst | 114 FreeBlockBestFitFirst |
115 }; | 115 }; |
116 | 116 |
117 PromotionInfo _promoInfo; | 117 PromotionInfo _promoInfo; |
118 | 118 |
119 // Helps to impose a global total order on freelistLock ranks; | 119 // helps to impose a global total order on freelistLock ranks; |
120 // assumes that CFLSpace's are allocated in global total order | 120 // assumes that CFLSpace's are allocated in global total order |
121 static int _lockRank; | 121 static int _lockRank; |
122 | 122 |
123 // A lock protecting the free lists and free blocks; | 123 // a lock protecting the free lists and free blocks; |
124 // mutable because of ubiquity of locking even for otherwise const methods | 124 // mutable because of ubiquity of locking even for otherwise const methods |
125 mutable Mutex _freelistLock; | 125 mutable Mutex _freelistLock; |
126 // Locking verifier convenience function | 126 // locking verifier convenience function |
127 void assert_locked() const PRODUCT_RETURN; | 127 void assert_locked() const PRODUCT_RETURN; |
128 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; | 128 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
129 | 129 |
130 // Linear allocation blocks | 130 // Linear allocation blocks |
131 LinearAllocBlock _smallLinearAllocBlock; | 131 LinearAllocBlock _smallLinearAllocBlock; |
132 | 132 |
133 FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; | 133 FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; |
134 AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks | 134 AFLBinaryTreeDictionary* _dictionary; // ptr to dictionary for large size blocks |
135 | 135 |
136 // Indexed array for small size blocks | |
137 AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; | 136 AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; |
138 | 137 // indexed array for small size blocks |
139 // Allocation strategy | 138 // allocation stategy |
140 bool _fitStrategy; // Use best fit strategy | 139 bool _fitStrategy; // Use best fit strategy. |
141 bool _adaptive_freelists; // Use adaptive freelists | 140 bool _adaptive_freelists; // Use adaptive freelists |
142 | 141 |
143 // This is an address close to the largest free chunk in the heap. | 142 // This is an address close to the largest free chunk in the heap. |
144 // It is currently assumed to be at the end of the heap. Free | 143 // It is currently assumed to be at the end of the heap. Free |
145 // chunks with addresses greater than nearLargestChunk are coalesced | 144 // chunks with addresses greater than nearLargestChunk are coalesced |
156 // Initialization helpers. | 155 // Initialization helpers. |
157 void initializeIndexedFreeListArray(); | 156 void initializeIndexedFreeListArray(); |
158 | 157 |
159 // Extra stuff to manage promotion parallelism. | 158 // Extra stuff to manage promotion parallelism. |
160 | 159 |
161 // A lock protecting the dictionary during par promotion allocation. | 160 // a lock protecting the dictionary during par promotion allocation. |
162 mutable Mutex _parDictionaryAllocLock; | 161 mutable Mutex _parDictionaryAllocLock; |
163 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } | 162 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } |
164 | 163 |
165 // Locks protecting the exact lists during par promotion allocation. | 164 // Locks protecting the exact lists during par promotion allocation. |
166 Mutex* _indexedFreeListParLocks[IndexSetSize]; | 165 Mutex* _indexedFreeListParLocks[IndexSetSize]; |
274 void freed(HeapWord* start, size_t size) { | 273 void freed(HeapWord* start, size_t size) { |
275 _bt.freed(start, size); | 274 _bt.freed(start, size); |
276 } | 275 } |
277 | 276 |
278 protected: | 277 protected: |
279 // Reset the indexed free list to its initial empty condition. | 278 // reset the indexed free list to its initial empty condition. |
280 void resetIndexedFreeListArray(); | 279 void resetIndexedFreeListArray(); |
281 // Reset to an initial state with a single free block described | 280 // reset to an initial state with a single free block described |
282 // by the MemRegion parameter. | 281 // by the MemRegion parameter. |
283 void reset(MemRegion mr); | 282 void reset(MemRegion mr); |
284 // Return the total number of words in the indexed free lists. | 283 // Return the total number of words in the indexed free lists. |
285 size_t totalSizeInIndexedFreeLists() const; | 284 size_t totalSizeInIndexedFreeLists() const; |
286 | 285 |
287 public: | 286 public: |
288 // Constructor | 287 // Constructor... |
289 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, | 288 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, |
290 bool use_adaptive_freelists, | 289 bool use_adaptive_freelists, |
291 FreeBlockDictionary<FreeChunk>::DictionaryChoice); | 290 FreeBlockDictionary<FreeChunk>::DictionaryChoice); |
292 // Accessors | 291 // accessors |
293 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } | 292 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } |
294 FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } | 293 FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } |
295 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } | 294 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
296 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } | 295 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } |
297 | 296 |
298 // Set CMS global values. | 297 // Set CMS global values |
299 static void set_cms_values(); | 298 static void set_cms_values(); |
300 | 299 |
301 // Return the free chunk at the end of the space. If no such | 300 // Return the free chunk at the end of the space. If no such |
302 // chunk exists, return NULL. | 301 // chunk exists, return NULL. |
303 FreeChunk* find_chunk_at_end(); | 302 FreeChunk* find_chunk_at_end(); |
304 | 303 |
305 bool adaptive_freelists() const { return _adaptive_freelists; } | 304 bool adaptive_freelists() const { return _adaptive_freelists; } |
306 | 305 |
307 void set_collector(CMSCollector* collector) { _collector = collector; } | 306 void set_collector(CMSCollector* collector) { _collector = collector; } |
308 | 307 |
309 // Support for parallelization of rescan and marking. | 308 // Support for parallelization of rescan and marking |
310 const size_t rescan_task_size() const { return _rescan_task_size; } | 309 const size_t rescan_task_size() const { return _rescan_task_size; } |
311 const size_t marking_task_size() const { return _marking_task_size; } | 310 const size_t marking_task_size() const { return _marking_task_size; } |
312 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } | 311 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } |
313 void initialize_sequential_subtasks_for_rescan(int n_threads); | 312 void initialize_sequential_subtasks_for_rescan(int n_threads); |
314 void initialize_sequential_subtasks_for_marking(int n_threads, | 313 void initialize_sequential_subtasks_for_marking(int n_threads, |
345 virtual bool is_free_block(const HeapWord* p) const; | 344 virtual bool is_free_block(const HeapWord* p) const; |
346 | 345 |
347 // Resizing support | 346 // Resizing support |
348 void set_end(HeapWord* value); // override | 347 void set_end(HeapWord* value); // override |
349 | 348 |
350 // Mutual exclusion support | 349 // mutual exclusion support |
351 Mutex* freelistLock() const { return &_freelistLock; } | 350 Mutex* freelistLock() const { return &_freelistLock; } |
352 | 351 |
353 // Iteration support | 352 // Iteration support |
354 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); | 353 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
355 void oop_iterate(ExtendedOopClosure* cl); | 354 void oop_iterate(ExtendedOopClosure* cl); |
369 // Requires that "mr" be entirely within the space. | 368 // Requires that "mr" be entirely within the space. |
370 // Apply "cl->do_object" to all objects that intersect with "mr". | 369 // Apply "cl->do_object" to all objects that intersect with "mr". |
371 // If the iteration encounters an unparseable portion of the region, | 370 // If the iteration encounters an unparseable portion of the region, |
372 // terminate the iteration and return the address of the start of the | 371 // terminate the iteration and return the address of the start of the |
373 // subregion that isn't done. Return of "NULL" indicates that the | 372 // subregion that isn't done. Return of "NULL" indicates that the |
374 // iteration completed. | 373 // interation completed. |
375 virtual HeapWord* | 374 virtual HeapWord* |
376 object_iterate_careful_m(MemRegion mr, | 375 object_iterate_careful_m(MemRegion mr, |
377 ObjectClosureCareful* cl); | 376 ObjectClosureCareful* cl); |
378 virtual HeapWord* | 377 virtual HeapWord* |
379 object_iterate_careful(ObjectClosureCareful* cl); | 378 object_iterate_careful(ObjectClosureCareful* cl); |
392 bool block_is_obj(const HeapWord* p) const; | 391 bool block_is_obj(const HeapWord* p) const; |
393 bool obj_is_alive(const HeapWord* p) const; | 392 bool obj_is_alive(const HeapWord* p) const; |
394 size_t block_size_nopar(const HeapWord* p) const; | 393 size_t block_size_nopar(const HeapWord* p) const; |
395 bool block_is_obj_nopar(const HeapWord* p) const; | 394 bool block_is_obj_nopar(const HeapWord* p) const; |
396 | 395 |
397 // Iteration support for promotion | 396 // iteration support for promotion |
398 void save_marks(); | 397 void save_marks(); |
399 bool no_allocs_since_save_marks(); | 398 bool no_allocs_since_save_marks(); |
400 | 399 |
401 // Iteration support for sweeping | 400 // iteration support for sweeping |
402 void save_sweep_limit() { | 401 void save_sweep_limit() { |
403 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? | 402 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? |
404 unallocated_block() : end(); | 403 unallocated_block() : end(); |
405 if (CMSTraceSweeper) { | 404 if (CMSTraceSweeper) { |
406 gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT | 405 gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT |
456 // heap when promoting an obj of size obj_size. | 455 // heap when promoting an obj of size obj_size. |
457 size_t expansionSpaceRequired(size_t obj_size) const; | 456 size_t expansionSpaceRequired(size_t obj_size) const; |
458 | 457 |
459 FreeChunk* allocateScratch(size_t size); | 458 FreeChunk* allocateScratch(size_t size); |
460 | 459 |
461 // Returns true if either the small or large linear allocation buffer is empty. | 460 // returns true if either the small or large linear allocation buffer is empty. |
462 bool linearAllocationWouldFail() const; | 461 bool linearAllocationWouldFail() const; |
463 | 462 |
464 // Adjust the chunk for the minimum size. This version is called in | 463 // Adjust the chunk for the minimum size. This version is called in |
465 // most cases in CompactibleFreeListSpace methods. | 464 // most cases in CompactibleFreeListSpace methods. |
466 inline static size_t adjustObjectSize(size_t size) { | 465 inline static size_t adjustObjectSize(size_t size) { |
476 virtual size_t minimum_free_block_size() const { return MinChunkSize; } | 475 virtual size_t minimum_free_block_size() const { return MinChunkSize; } |
477 void removeFreeChunkFromFreeLists(FreeChunk* chunk); | 476 void removeFreeChunkFromFreeLists(FreeChunk* chunk); |
478 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, | 477 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, |
479 bool coalesced); | 478 bool coalesced); |
480 | 479 |
481 // Support for decisions regarding concurrent collection policy. | 480 // Support for decisions regarding concurrent collection policy |
482 bool should_concurrent_collect() const; | 481 bool should_concurrent_collect() const; |
483 | 482 |
484 // Support for compaction. | 483 // Support for compaction |
485 void prepare_for_compaction(CompactPoint* cp); | 484 void prepare_for_compaction(CompactPoint* cp); |
486 void adjust_pointers(); | 485 void adjust_pointers(); |
487 void compact(); | 486 void compact(); |
488 // Reset the space to reflect the fact that a compaction of the | 487 // reset the space to reflect the fact that a compaction of the |
489 // space has been done. | 488 // space has been done. |
490 virtual void reset_after_compaction(); | 489 virtual void reset_after_compaction(); |
491 | 490 |
492 // Debugging support. | 491 // Debugging support |
493 void print() const; | 492 void print() const; |
494 void print_on(outputStream* st) const; | 493 void print_on(outputStream* st) const; |
495 void prepare_for_verify(); | 494 void prepare_for_verify(); |
496 void verify() const; | 495 void verify() const; |
497 void verifyFreeLists() const PRODUCT_RETURN; | 496 void verifyFreeLists() const PRODUCT_RETURN; |
499 void verifyIndexedFreeList(size_t size) const; | 498 void verifyIndexedFreeList(size_t size) const; |
500 // Verify that the given chunk is in the free lists: | 499 // Verify that the given chunk is in the free lists: |
501 // i.e. either the binary tree dictionary, the indexed free lists | 500 // i.e. either the binary tree dictionary, the indexed free lists |
502 // or the linear allocation block. | 501 // or the linear allocation block. |
503 bool verify_chunk_in_free_list(FreeChunk* fc) const; | 502 bool verify_chunk_in_free_list(FreeChunk* fc) const; |
504 // Verify that the given chunk is the linear allocation block. | 503 // Verify that the given chunk is the linear allocation block |
505 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; | 504 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; |
506 // Do some basic checks on the the free lists. | 505 // Do some basic checks on the the free lists. |
507 void check_free_list_consistency() const PRODUCT_RETURN; | 506 void check_free_list_consistency() const PRODUCT_RETURN; |
508 | 507 |
509 // Printing support | 508 // Printing support |
515 NOT_PRODUCT ( | 514 NOT_PRODUCT ( |
516 void initializeIndexedFreeListArrayReturnedBytes(); | 515 void initializeIndexedFreeListArrayReturnedBytes(); |
517 size_t sumIndexedFreeListArrayReturnedBytes(); | 516 size_t sumIndexedFreeListArrayReturnedBytes(); |
518 // Return the total number of chunks in the indexed free lists. | 517 // Return the total number of chunks in the indexed free lists. |
519 size_t totalCountInIndexedFreeLists() const; | 518 size_t totalCountInIndexedFreeLists() const; |
520 // Return the total number of chunks in the space. | 519 // Return the total numberof chunks in the space. |
521 size_t totalCount(); | 520 size_t totalCount(); |
522 ) | 521 ) |
523 | 522 |
524 // The census consists of counts of the quantities such as | 523 // The census consists of counts of the quantities such as |
525 // the current count of the free chunks, number of chunks | 524 // the current count of the free chunks, number of chunks |