comparison src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 141:fcbfc50865ab

6684395: Port NUMA-aware allocator to linux Summary: NUMA-aware allocator port to Linux Reviewed-by: jmasa, apetrusenko
author iveresov
date Tue, 29 Apr 2008 13:51:26 +0400
parents a61af66fc99e
children e3729351c946
comparison
equal deleted inserted replaced
140:3febac328d82 141:fcbfc50865ab
44 44
45 void MutableNUMASpace::mangle_unused_area() { 45 void MutableNUMASpace::mangle_unused_area() {
46 for (int i = 0; i < lgrp_spaces()->length(); i++) { 46 for (int i = 0; i < lgrp_spaces()->length(); i++) {
47 LGRPSpace *ls = lgrp_spaces()->at(i); 47 LGRPSpace *ls = lgrp_spaces()->at(i);
48 MutableSpace *s = ls->space(); 48 MutableSpace *s = ls->space();
49 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); 49 if (!os::numa_has_static_binding()) {
50 if (top < s->end()) { 50 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
51 ls->add_invalid_region(MemRegion(top, s->end())); 51 if (top < s->end()) {
52 ls->add_invalid_region(MemRegion(top, s->end()));
53 }
52 } 54 }
53 s->mangle_unused_area(); 55 s->mangle_unused_area();
54 } 56 }
55 } 57 }
56 58
68 if (!ZapUnusedHeapArea) { 70 if (!ZapUnusedHeapArea) {
69 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), 71 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
70 area_touched_words); 72 area_touched_words);
71 } 73 }
72 #endif 74 #endif
73 MemRegion invalid; 75 if (!os::numa_has_static_binding()) {
74 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); 76 MemRegion invalid;
75 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), 77 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
76 os::vm_page_size()); 78 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
77 if (crossing_start != crossing_end) { 79 os::vm_page_size());
78 // If object header crossed a small page boundary we mark the area 80 if (crossing_start != crossing_end) {
79 // as invalid rounding it to a page_size(). 81 // If object header crossed a small page boundary we mark the area
80 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); 82 // as invalid rounding it to a page_size().
81 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), 83 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
82 s->end()); 84 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
83 invalid = MemRegion(start, end); 85 s->end());
84 } 86 invalid = MemRegion(start, end);
85 87 }
86 ls->add_invalid_region(invalid); 88
89 ls->add_invalid_region(invalid);
90 }
87 s->set_top(s->end()); 91 s->set_top(s->end());
88 } 92 }
89 } else { 93 } else {
94 if (!os::numa_has_static_binding()) {
90 #ifdef ASSERT 95 #ifdef ASSERT
91 MemRegion invalid(s->top(), s->end());
92 ls->add_invalid_region(invalid);
93 #else
94 if (ZapUnusedHeapArea) {
95 MemRegion invalid(s->top(), s->end()); 96 MemRegion invalid(s->top(), s->end());
96 ls->add_invalid_region(invalid); 97 ls->add_invalid_region(invalid);
97 } else break; 98 #else
99 if (ZapUnusedHeapArea) {
100 MemRegion invalid(s->top(), s->end());
101 ls->add_invalid_region(invalid);
102 } else break;
98 #endif 103 #endif
104 }
99 } 105 }
100 } 106 }
101 } 107 }
102 108
103 size_t MutableNUMASpace::used_in_words() const { 109 size_t MutableNUMASpace::used_in_words() const {
192 } 198 }
193 return false; 199 return false;
194 } 200 }
195 201
196 // Bias region towards the first-touching lgrp. Set the right page sizes. 202 // Bias region towards the first-touching lgrp. Set the right page sizes.
197 void MutableNUMASpace::bias_region(MemRegion mr) { 203 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
198 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); 204 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
199 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); 205 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
200 if (end > start) { 206 if (end > start) {
201 MemRegion aligned_region(start, end); 207 MemRegion aligned_region(start, end);
202 assert((intptr_t)aligned_region.start() % page_size() == 0 && 208 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
203 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); 209 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
204 assert(region().contains(aligned_region), "Sanity"); 210 assert(region().contains(aligned_region), "Sanity");
211 // First we tell the OS which page size we want in the given range. The underlying
212 // large page can be broken down if we require small pages.
213 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
214 // Then we uncommit the pages in the range.
205 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); 215 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
206 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); 216 // And make them local/first-touch biased.
207 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size()); 217 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
208 } 218 }
209 } 219 }
210 220
211 // Free all pages in the region. 221 // Free all pages in the region.
212 void MutableNUMASpace::free_region(MemRegion mr) { 222 void MutableNUMASpace::free_region(MemRegion mr) {
231 s->set_top(s->bottom()); 241 s->set_top(s->bottom());
232 } 242 }
233 initialize(region(), true); 243 initialize(region(), true);
234 } else { 244 } else {
235 bool should_initialize = false; 245 bool should_initialize = false;
236 for (int i = 0; i < lgrp_spaces()->length(); i++) { 246 if (!os::numa_has_static_binding()) {
237 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { 247 for (int i = 0; i < lgrp_spaces()->length(); i++) {
238 should_initialize = true; 248 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
239 break; 249 should_initialize = true;
250 break;
251 }
240 } 252 }
241 } 253 }
242 254
243 if (should_initialize || 255 if (should_initialize ||
244 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { 256 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
470 prev_page_size > page_size()) { // If the page size got smaller we have to change 482 prev_page_size > page_size()) { // If the page size got smaller we have to change
471 // the page size preference for the whole space. 483 // the page size preference for the whole space.
472 intersection = MemRegion(new_region.start(), new_region.start()); 484 intersection = MemRegion(new_region.start(), new_region.start());
473 } 485 }
474 select_tails(new_region, intersection, &bottom_region, &top_region); 486 select_tails(new_region, intersection, &bottom_region, &top_region);
475 bias_region(bottom_region); 487 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
476 bias_region(top_region); 488 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
477 } 489 }
478 490
479 // Check if the space layout has changed significantly? 491 // Check if the space layout has changed significantly?
480 // This happens when the space has been resized so that either head or tail 492 // This happens when the space has been resized so that either head or tail
481 // chunk became less than a page. 493 // chunk became less than a page.
543 555
544 if (intersection.start() == NULL || intersection.end() == NULL) { 556 if (intersection.start() == NULL || intersection.end() == NULL) {
545 intersection = MemRegion(new_region.start(), new_region.start()); 557 intersection = MemRegion(new_region.start(), new_region.start());
546 } 558 }
547 559
548 MemRegion invalid_region = ls->invalid_region().intersection(new_region); 560 if (!os::numa_has_static_binding()) {
549 if (!invalid_region.is_empty()) { 561 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
550 merge_regions(new_region, &intersection, &invalid_region); 562 // Invalid region is a range of memory that could've possibly
551 free_region(invalid_region); 563 // been allocated on the other node. That's relevant only on Solaris where
552 } 564 // there is no static memory binding.
565 if (!invalid_region.is_empty()) {
566 merge_regions(new_region, &intersection, &invalid_region);
567 free_region(invalid_region);
568 ls->set_invalid_region(MemRegion());
569 }
570 }
571
553 select_tails(new_region, intersection, &bottom_region, &top_region); 572 select_tails(new_region, intersection, &bottom_region, &top_region);
554 free_region(bottom_region); 573
555 free_region(top_region); 574 if (!os::numa_has_static_binding()) {
575 // If that's a system with the first-touch policy then it's enough
576 // to free the pages.
577 free_region(bottom_region);
578 free_region(top_region);
579 } else {
580 // In a system with static binding we have to change the bias whenever
581 // we reshape the heap.
582 bias_region(bottom_region, ls->lgrp_id());
583 bias_region(top_region, ls->lgrp_id());
584 }
556 585
557 // If we clear the region, we would mangle it in debug. That would cause page 586 // If we clear the region, we would mangle it in debug. That would cause page
558 // allocation in a different place. Hence setting the top directly. 587 // allocation in a different place. Hence setting the top directly.
559 s->initialize(new_region, false); 588 s->initialize(new_region, false);
560 s->set_top(s->bottom()); 589 s->set_top(s->bottom());
561 590
562 ls->set_invalid_region(MemRegion());
563
564 set_adaptation_cycles(samples_count()); 591 set_adaptation_cycles(samples_count());
565 } 592 }
566 } 593 }
567 594
568 // Set the top of the whole space. 595 // Set the top of the whole space.
573 LGRPSpace *ls = lgrp_spaces()->at(i); 600 LGRPSpace *ls = lgrp_spaces()->at(i);
574 MutableSpace *s = ls->space(); 601 MutableSpace *s = ls->space();
575 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); 602 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
576 603
577 if (s->contains(value)) { 604 if (s->contains(value)) {
578 if (top < value && top < s->end()) { 605 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
579 ls->add_invalid_region(MemRegion(top, value)); 606 ls->add_invalid_region(MemRegion(top, value));
580 } 607 }
581 s->set_top(value); 608 s->set_top(value);
582 found_top = true; 609 found_top = true;
583 } else { 610 } else {
584 if (found_top) { 611 if (found_top) {
585 s->set_top(s->bottom()); 612 s->set_top(s->bottom());
586 } else { 613 } else {
587 if (top < s->end()) { 614 if (!os::numa_has_static_binding() && top < s->end()) {
588 ls->add_invalid_region(MemRegion(top, s->end())); 615 ls->add_invalid_region(MemRegion(top, s->end()));
589 } 616 }
590 s->set_top(s->end()); 617 s->set_top(s->end());
591 } 618 }
592 } 619 }
593 } 620 }
594 MutableSpace::set_top(value); 621 MutableSpace::set_top(value);
595 } 622 }
599 for (int i = 0; i < lgrp_spaces()->length(); i++) { 626 for (int i = 0; i < lgrp_spaces()->length(); i++) {
600 lgrp_spaces()->at(i)->space()->clear(); 627 lgrp_spaces()->at(i)->space()->clear();
601 } 628 }
602 } 629 }
603 630
631 /*
632 Linux supports static memory binding, therefore the most part of the
633 logic dealing with the possible invalid page allocation is effectively
634 disabled. Besides there is no notion of the home node in Linux. A
635 thread is allowed to migrate freely. Although the scheduler is rather
636 reluctant to move threads between the nodes. We check for the current
637 node every allocation. And with a high probability a thread stays on
638 the same node for some time allowing local access to recently allocated
639 objects.
640 */
641
604 HeapWord* MutableNUMASpace::allocate(size_t size) { 642 HeapWord* MutableNUMASpace::allocate(size_t size) {
605 int lgrp_id = Thread::current()->lgrp_id(); 643 Thread* thr = Thread::current();
606 if (lgrp_id == -1) { 644 int lgrp_id = thr->lgrp_id();
645 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
607 lgrp_id = os::numa_get_group_id(); 646 lgrp_id = os::numa_get_group_id();
608 Thread::current()->set_lgrp_id(lgrp_id); 647 thr->set_lgrp_id(lgrp_id);
609 } 648 }
610 649
611 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 650 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
612 651
613 // It is possible that a new CPU has been hotplugged and 652 // It is possible that a new CPU has been hotplugged and
626 if (p != NULL) { 665 if (p != NULL) {
627 if (top() < s->top()) { // Keep _top updated. 666 if (top() < s->top()) { // Keep _top updated.
628 MutableSpace::set_top(s->top()); 667 MutableSpace::set_top(s->top());
629 } 668 }
630 } 669 }
631 // Make the page allocation happen here. 670 // Make the page allocation happen here if there is no static binding..
632 if (p != NULL) { 671 if (p != NULL && !os::numa_has_static_binding()) {
633 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { 672 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
634 *(int*)i = 0; 673 *(int*)i = 0;
635 } 674 }
636 } 675 }
637
638 return p; 676 return p;
639 } 677 }
640 678
641 // This version is lock-free. 679 // This version is lock-free.
642 HeapWord* MutableNUMASpace::cas_allocate(size_t size) { 680 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
643 int lgrp_id = Thread::current()->lgrp_id(); 681 Thread* thr = Thread::current();
644 if (lgrp_id == -1) { 682 int lgrp_id = thr->lgrp_id();
683 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
645 lgrp_id = os::numa_get_group_id(); 684 lgrp_id = os::numa_get_group_id();
646 Thread::current()->set_lgrp_id(lgrp_id); 685 thr->set_lgrp_id(lgrp_id);
647 } 686 }
648 687
649 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 688 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
650 // It is possible that a new CPU has been hotplugged and 689 // It is possible that a new CPU has been hotplugged and
651 // we haven't reshaped the space accordingly. 690 // we haven't reshaped the space accordingly.
668 break; 707 break;
669 } 708 }
670 } 709 }
671 } 710 }
672 711
673 // Make the page allocation happen here. 712 // Make the page allocation happen here if there is no static binding.
674 if (p != NULL) { 713 if (p != NULL && !os::numa_has_static_binding() ) {
675 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { 714 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
676 *(int*)i = 0; 715 *(int*)i = 0;
677 } 716 }
678 } 717 }
679 return p; 718 return p;