comparison src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 269:850fdf70db2b

Merge
author jmasa
date Mon, 28 Jul 2008 15:30:23 -0700
parents d1605aabd0a1 d6340ab4105b
children 06df86c2ec37
comparison
equal deleted inserted replaced
238:3df2fe7c4451 269:850fdf70db2b
40 delete lgrp_spaces()->at(i); 40 delete lgrp_spaces()->at(i);
41 } 41 }
42 delete lgrp_spaces(); 42 delete lgrp_spaces();
43 } 43 }
44 44
45 #ifndef PRODUCT
45 void MutableNUMASpace::mangle_unused_area() { 46 void MutableNUMASpace::mangle_unused_area() {
46 for (int i = 0; i < lgrp_spaces()->length(); i++) { 47 // This method should do nothing.
47 LGRPSpace *ls = lgrp_spaces()->at(i); 48 // It can be called on a numa space during a full compaction.
48 MutableSpace *s = ls->space(); 49 }
49 if (!os::numa_has_static_binding()) { 50 void MutableNUMASpace::mangle_unused_area_complete() {
50 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); 51 // This method should do nothing.
51 if (top < s->end()) { 52 // It can be called on a numa space during a full compaction.
52 ls->add_invalid_region(MemRegion(top, s->end())); 53 }
53 } 54 void MutableNUMASpace::mangle_region(MemRegion mr) {
54 } 55 // This method should do nothing because numa spaces are not mangled.
55 s->mangle_unused_area(); 56 }
56 } 57 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
57 } 58 assert(false, "Do not mangle MutableNUMASpace's");
59 }
60 void MutableNUMASpace::set_top_for_allocations() {
61 // This method should do nothing.
62 }
63 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
64 // This method should do nothing.
65 }
66 void MutableNUMASpace::check_mangled_unused_area_complete() {
67 // This method should do nothing.
68 }
69 #endif // NOT_PRODUCT
58 70
59 // There may be unallocated holes in the middle chunks 71 // There may be unallocated holes in the middle chunks
60 // that should be filled with dead objects to ensure parseability. 72 // that should be filled with dead objects to ensure parseability.
61 void MutableNUMASpace::ensure_parsability() { 73 void MutableNUMASpace::ensure_parsability() {
62 for (int i = 0; i < lgrp_spaces()->length(); i++) { 74 for (int i = 0; i < lgrp_spaces()->length(); i++) {
127 139
128 140
129 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { 141 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
130 guarantee(thr != NULL, "No thread"); 142 guarantee(thr != NULL, "No thread");
131 int lgrp_id = thr->lgrp_id(); 143 int lgrp_id = thr->lgrp_id();
132 assert(lgrp_id != -1, "No lgrp_id set"); 144 if (lgrp_id == -1) {
145 // This case can occur after the topology of the system has
146 // changed. Thread can change their location, the new home
147 // group will be determined during the first allocation
148 // attempt. For now we can safely assume that all spaces
149 // have equal size because the whole space will be reinitialized.
150 if (lgrp_spaces()->length() > 0) {
151 return capacity_in_bytes() / lgrp_spaces()->length();
152 } else {
153 assert(false, "There should be at least one locality group");
154 return 0;
155 }
156 }
157 // That's the normal case, where we know the locality group of the thread.
133 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 158 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
134 if (i == -1) { 159 if (i == -1) {
135 return 0; 160 return 0;
136 } 161 }
137 return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); 162 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
138 } 163 }
139 164
140 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { 165 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
166 // Please see the comments for tlab_capacity().
141 guarantee(thr != NULL, "No thread"); 167 guarantee(thr != NULL, "No thread");
142 int lgrp_id = thr->lgrp_id(); 168 int lgrp_id = thr->lgrp_id();
143 assert(lgrp_id != -1, "No lgrp_id set"); 169 if (lgrp_id == -1) {
170 if (lgrp_spaces()->length() > 0) {
171 return free_in_bytes() / lgrp_spaces()->length();
172 } else {
173 assert(false, "There should be at least one locality group");
174 return 0;
175 }
176 }
144 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 177 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
145 if (i == -1) { 178 if (i == -1) {
146 return 0; 179 return 0;
147 } 180 }
148 return lgrp_spaces()->at(i)->space()->free_in_bytes(); 181 return lgrp_spaces()->at(i)->space()->free_in_bytes();
236 269
237 // Update space layout. Perform adaptation. 270 // Update space layout. Perform adaptation.
238 void MutableNUMASpace::update() { 271 void MutableNUMASpace::update() {
239 if (update_layout(false)) { 272 if (update_layout(false)) {
240 // If the topology has changed, make all chunks zero-sized. 273 // If the topology has changed, make all chunks zero-sized.
274 // And clear the alloc-rate statistics.
275 // In future we may want to handle this more gracefully in order
276 // to avoid the reallocation of the pages as much as possible.
241 for (int i = 0; i < lgrp_spaces()->length(); i++) { 277 for (int i = 0; i < lgrp_spaces()->length(); i++) {
242 MutableSpace *s = lgrp_spaces()->at(i)->space(); 278 LGRPSpace *ls = lgrp_spaces()->at(i);
279 MutableSpace *s = ls->space();
243 s->set_end(s->bottom()); 280 s->set_end(s->bottom());
244 s->set_top(s->bottom()); 281 s->set_top(s->bottom());
245 } 282 ls->clear_alloc_rate();
246 initialize(region(), true); 283 }
284 // A NUMA space is never mangled
285 initialize(region(),
286 SpaceDecorator::Clear,
287 SpaceDecorator::DontMangle);
247 } else { 288 } else {
248 bool should_initialize = false; 289 bool should_initialize = false;
249 if (!os::numa_has_static_binding()) { 290 if (!os::numa_has_static_binding()) {
250 for (int i = 0; i < lgrp_spaces()->length(); i++) { 291 for (int i = 0; i < lgrp_spaces()->length(); i++) {
251 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { 292 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
255 } 296 }
256 } 297 }
257 298
258 if (should_initialize || 299 if (should_initialize ||
259 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { 300 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
260 initialize(region(), true); 301 // A NUMA space is never mangled
302 initialize(region(),
303 SpaceDecorator::Clear,
304 SpaceDecorator::DontMangle);
261 } 305 }
262 } 306 }
263 307
264 if (NUMAStats) { 308 if (NUMAStats) {
265 for (int i = 0; i < lgrp_spaces()->length(); i++) { 309 for (int i = 0; i < lgrp_spaces()->length(); i++) {
446 } 490 }
447 *invalid_region = MemRegion(start, end); 491 *invalid_region = MemRegion(start, end);
448 } 492 }
449 } 493 }
450 494
451 void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) { 495 void MutableNUMASpace::initialize(MemRegion mr,
496 bool clear_space,
497 bool mangle_space) {
452 assert(clear_space, "Reallocation will destory data!"); 498 assert(clear_space, "Reallocation will destory data!");
453 assert(lgrp_spaces()->length() > 0, "There should be at least one space"); 499 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
454 500
455 MemRegion old_region = region(), new_region; 501 MemRegion old_region = region(), new_region;
456 set_bottom(mr.start()); 502 set_bottom(mr.start());
457 set_end(mr.end()); 503 set_end(mr.end());
458 MutableSpace::set_top(bottom()); 504 // Must always clear the space
505 clear(SpaceDecorator::DontMangle);
459 506
460 // Compute chunk sizes 507 // Compute chunk sizes
461 size_t prev_page_size = page_size(); 508 size_t prev_page_size = page_size();
462 set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size()); 509 set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size());
463 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); 510 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
584 // we reshape the heap. 631 // we reshape the heap.
585 bias_region(bottom_region, ls->lgrp_id()); 632 bias_region(bottom_region, ls->lgrp_id());
586 bias_region(top_region, ls->lgrp_id()); 633 bias_region(top_region, ls->lgrp_id());
587 } 634 }
588 635
589 // If we clear the region, we would mangle it in debug. That would cause page 636 // Clear space (set top = bottom) but never mangle.
590 // allocation in a different place. Hence setting the top directly. 637 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle);
591 s->initialize(new_region, false);
592 s->set_top(s->bottom());
593 638
594 set_adaptation_cycles(samples_count()); 639 set_adaptation_cycles(samples_count());
595 } 640 }
596 } 641 }
597 642
639 i++; 684 i++;
640 } 685 }
641 MutableSpace::set_top(value); 686 MutableSpace::set_top(value);
642 } 687 }
643 688
644 void MutableNUMASpace::clear() { 689 void MutableNUMASpace::clear(bool mangle_space) {
645 MutableSpace::set_top(bottom()); 690 MutableSpace::set_top(bottom());
646 for (int i = 0; i < lgrp_spaces()->length(); i++) { 691 for (int i = 0; i < lgrp_spaces()->length(); i++) {
647 lgrp_spaces()->at(i)->space()->clear(); 692 // Never mangle NUMA spaces because the mangling will
693 // bind the memory to a possibly unwanted lgroup.
694 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
648 } 695 }
649 } 696 }
650 697
651 /* 698 /*
652 Linux supports static memory binding, therefore the most part of the 699 Linux supports static memory binding, therefore the most part of the