comparison src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 268:d6340ab4105b

6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set") 6723229: NUMA allocator: assert(lgrp_num > 0, "There should be at least one locality group") Summary: The fix takes care of the assertion triggered during TLAB resizing after reconfiguration. Also it now handles a defect in the topology graph, in which a single leaf node doesn't have memory. Reviewed-by: jmasa
author iveresov
date Thu, 17 Jul 2008 10:26:33 -0700
parents 12eea04c8b06
children 850fdf70db2b
comparison
equal deleted inserted replaced
267:9d6a3a6891f8 268:d6340ab4105b
139 139
140 140
141 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { 141 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
142 guarantee(thr != NULL, "No thread"); 142 guarantee(thr != NULL, "No thread");
143 int lgrp_id = thr->lgrp_id(); 143 int lgrp_id = thr->lgrp_id();
144 assert(lgrp_id != -1, "No lgrp_id set"); 144 if (lgrp_id == -1) {
145 // This case can occur after the topology of the system has
146 // changed. Thread can change their location, the new home
147 // group will be determined during the first allocation
148 // attempt. For now we can safely assume that all spaces
149 // have equal size because the whole space will be reinitialized.
150 if (lgrp_spaces()->length() > 0) {
151 return capacity_in_bytes() / lgrp_spaces()->length();
152 } else {
153 assert(false, "There should be at least one locality group");
154 return 0;
155 }
156 }
157 // That's the normal case, where we know the locality group of the thread.
145 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 158 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
146 if (i == -1) { 159 if (i == -1) {
147 return 0; 160 return 0;
148 } 161 }
149 return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); 162 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
150 } 163 }
151 164
152 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { 165 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
166 // Please see the comments for tlab_capacity().
153 guarantee(thr != NULL, "No thread"); 167 guarantee(thr != NULL, "No thread");
154 int lgrp_id = thr->lgrp_id(); 168 int lgrp_id = thr->lgrp_id();
155 assert(lgrp_id != -1, "No lgrp_id set"); 169 if (lgrp_id == -1) {
170 if (lgrp_spaces()->length() > 0) {
171 return free_in_bytes() / lgrp_spaces()->length();
172 } else {
173 assert(false, "There should be at least one locality group");
174 return 0;
175 }
176 }
156 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 177 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
157 if (i == -1) { 178 if (i == -1) {
158 return 0; 179 return 0;
159 } 180 }
160 return lgrp_spaces()->at(i)->space()->free_in_bytes(); 181 return lgrp_spaces()->at(i)->space()->free_in_bytes();
248 269
249 // Update space layout. Perform adaptation. 270 // Update space layout. Perform adaptation.
250 void MutableNUMASpace::update() { 271 void MutableNUMASpace::update() {
251 if (update_layout(false)) { 272 if (update_layout(false)) {
252 // If the topology has changed, make all chunks zero-sized. 273 // If the topology has changed, make all chunks zero-sized.
274 // And clear the alloc-rate statistics.
275 // In future we may want to handle this more gracefully in order
276 // to avoid the reallocation of the pages as much as possible.
253 for (int i = 0; i < lgrp_spaces()->length(); i++) { 277 for (int i = 0; i < lgrp_spaces()->length(); i++) {
254 MutableSpace *s = lgrp_spaces()->at(i)->space(); 278 LGRPSpace *ls = lgrp_spaces()->at(i);
279 MutableSpace *s = ls->space();
255 s->set_end(s->bottom()); 280 s->set_end(s->bottom());
256 s->set_top(s->bottom()); 281 s->set_top(s->bottom());
282 ls->clear_alloc_rate();
257 } 283 }
258 // A NUMA space is never mangled 284 // A NUMA space is never mangled
259 initialize(region(), 285 initialize(region(),
260 SpaceDecorator::Clear, 286 SpaceDecorator::Clear,
261 SpaceDecorator::DontMangle); 287 SpaceDecorator::DontMangle);