comparison src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp @ 373:06df86c2ec37

6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing Summary: Treat a chuck where the allocation has failed as fully used. Reviewed-by: ysr
author iveresov
date Sat, 27 Sep 2008 00:33:13 -0700
parents 850fdf70db2b
children 4e400c36026f
comparison
equal deleted inserted replaced
365:5f44674206d3 373:06df86c2ec37
58 class LGRPSpace : public CHeapObj { 58 class LGRPSpace : public CHeapObj {
59 int _lgrp_id; 59 int _lgrp_id;
60 MutableSpace* _space; 60 MutableSpace* _space;
61 MemRegion _invalid_region; 61 MemRegion _invalid_region;
62 AdaptiveWeightedAverage *_alloc_rate; 62 AdaptiveWeightedAverage *_alloc_rate;
63 bool _allocation_failed;
63 64
64 struct SpaceStats { 65 struct SpaceStats {
65 size_t _local_space, _remote_space, _unbiased_space, _uncommited_space; 66 size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
66 size_t _large_pages, _small_pages; 67 size_t _large_pages, _small_pages;
67 68
79 80
80 char* _last_page_scanned; 81 char* _last_page_scanned;
81 char* last_page_scanned() { return _last_page_scanned; } 82 char* last_page_scanned() { return _last_page_scanned; }
82 void set_last_page_scanned(char* p) { _last_page_scanned = p; } 83 void set_last_page_scanned(char* p) { _last_page_scanned = p; }
83 public: 84 public:
84 LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL) { 85 LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
85 _space = new MutableSpace(); 86 _space = new MutableSpace();
86 _alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight); 87 _alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
87 } 88 }
88 ~LGRPSpace() { 89 ~LGRPSpace() {
89 delete _space; 90 delete _space;
101 102
102 static bool equals(void* lgrp_id_value, LGRPSpace* p) { 103 static bool equals(void* lgrp_id_value, LGRPSpace* p) {
103 return *(int*)lgrp_id_value == p->lgrp_id(); 104 return *(int*)lgrp_id_value == p->lgrp_id();
104 } 105 }
105 106
107 // Report a failed allocation.
108 void set_allocation_failed() { _allocation_failed = true; }
109
106 void sample() { 110 void sample() {
107 alloc_rate()->sample(space()->used_in_bytes()); 111 // If there was a failed allocation make allocation rate equal
112 // to the size of the whole chunk. This ensures the progress of
113 // the adaptation process.
114 size_t alloc_rate_sample;
115 if (_allocation_failed) {
116 alloc_rate_sample = space()->capacity_in_bytes();
117 _allocation_failed = false;
118 } else {
119 alloc_rate_sample = space()->used_in_bytes();
120 }
121 alloc_rate()->sample(alloc_rate_sample);
108 } 122 }
109 123
110 MemRegion invalid_region() const { return _invalid_region; } 124 MemRegion invalid_region() const { return _invalid_region; }
111 void set_invalid_region(MemRegion r) { _invalid_region = r; } 125 void set_invalid_region(MemRegion r) { _invalid_region = r; }
112 int lgrp_id() const { return _lgrp_id; } 126 int lgrp_id() const { return _lgrp_id; }
188 virtual void set_top_for_allocations() PRODUCT_RETURN; 202 virtual void set_top_for_allocations() PRODUCT_RETURN;
189 203
190 virtual void ensure_parsability(); 204 virtual void ensure_parsability();
191 virtual size_t used_in_words() const; 205 virtual size_t used_in_words() const;
192 virtual size_t free_in_words() const; 206 virtual size_t free_in_words() const;
207
208 using MutableSpace::capacity_in_words;
209 virtual size_t capacity_in_words(Thread* thr) const;
193 virtual size_t tlab_capacity(Thread* thr) const; 210 virtual size_t tlab_capacity(Thread* thr) const;
194 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 211 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
195 212
196 // Allocation (return NULL if full) 213 // Allocation (return NULL if full)
197 virtual HeapWord* allocate(size_t word_size); 214 virtual HeapWord* allocate(size_t word_size);