Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 21812:d25a7e8695dc
Merge
author | asaha |
---|---|
date | Tue, 14 Oct 2014 11:38:53 -0700 |
parents | 78bbf4d43a14 |
children | 52b4284cb496 |
rev | line source |
---|---|
0 | 1 |
2 /* | |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
17727
diff
changeset
|
3 * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. |
0 | 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 * | |
6 * This code is free software; you can redistribute it and/or modify it | |
7 * under the terms of the GNU General Public License version 2 only, as | |
8 * published by the Free Software Foundation. | |
9 * | |
10 * This code is distributed in the hope that it will be useful, but WITHOUT | |
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 * version 2 for more details (a copy is included in the LICENSE file that | |
14 * accompanied this code). | |
15 * | |
16 * You should have received a copy of the GNU General Public License version | |
17 * 2 along with this work; if not, write to the Free Software Foundation, | |
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
21 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
22 * questions. |
0 | 23 * |
24 */ | |
25 | |
1972 | 26 #include "precompiled.hpp" |
27 #include "gc_implementation/shared/mutableNUMASpace.hpp" | |
28 #include "gc_implementation/shared/spaceDecorator.hpp" | |
29 #include "memory/sharedHeap.hpp" | |
30 #include "oops/oop.inline.hpp" | |
7180
f34d701e952e
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
6197
diff
changeset
|
31 #include "runtime/thread.inline.hpp" |
0 | 32 |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
17727
diff
changeset
|
33 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
17727
diff
changeset
|
34 |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
35 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) { |
6197 | 36 _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true); |
0 | 37 _page_size = os::vm_page_size(); |
38 _adaptation_cycles = 0; | |
39 _samples_count = 0; | |
40 update_layout(true); | |
41 } | |
42 | |
43 MutableNUMASpace::~MutableNUMASpace() { | |
44 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
45 delete lgrp_spaces()->at(i); | |
46 } | |
47 delete lgrp_spaces(); | |
48 } | |
49 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
50 #ifndef PRODUCT |
0 | 51 void MutableNUMASpace::mangle_unused_area() { |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
52 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
53 // It can be called on a numa space during a full compaction. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
54 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
55 void MutableNUMASpace::mangle_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
56 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
57 // It can be called on a numa space during a full compaction. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
58 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
59 void MutableNUMASpace::mangle_region(MemRegion mr) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
60 // This method should do nothing because numa spaces are not mangled. |
0 | 61 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
62 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
63 assert(false, "Do not mangle MutableNUMASpace's"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
64 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
65 void MutableNUMASpace::set_top_for_allocations() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
66 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
67 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
68 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
69 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
70 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
71 void MutableNUMASpace::check_mangled_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
72 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
73 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
74 #endif // NOT_PRODUCT |
0 | 75 |
76 // There may be unallocated holes in the middle chunks | |
77 // that should be filled with dead objects to ensure parseability. | |
78 void MutableNUMASpace::ensure_parsability() { | |
79 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
80 LGRPSpace *ls = lgrp_spaces()->at(i); | |
81 MutableSpace *s = ls->space(); | |
605 | 82 if (s->top() < top()) { // For all spaces preceding the one containing top() |
0 | 83 if (s->free_in_words() > 0) { |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
84 intptr_t cur_top = (intptr_t)s->top(); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
85 size_t words_left_to_fill = pointer_delta(s->end(), s->top());; |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
86 while (words_left_to_fill > 0) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
87 size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
88 assert(words_to_fill >= CollectedHeap::min_fill_size(), |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
89 err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")", |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
90 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size())); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
91 CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
92 if (!os::numa_has_static_binding()) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
93 size_t touched_words = words_to_fill; |
0 | 94 #ifndef ASSERT |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
95 if (!ZapUnusedHeapArea) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
96 touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
97 touched_words); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
98 } |
0 | 99 #endif |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
100 MemRegion invalid; |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
101 HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
102 HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
103 if (crossing_start != crossing_end) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
104 // If object header crossed a small page boundary we mark the area |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
105 // as invalid rounding it to a page_size(). |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
106 HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
107 HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
108 invalid = MemRegion(start, end); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
109 } |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
110 |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
111 ls->add_invalid_region(invalid); |
141 | 112 } |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
113 cur_top = cur_top + (words_to_fill * HeapWordSize); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
114 words_left_to_fill -= words_to_fill; |
0 | 115 } |
116 } | |
117 } else { | |
141 | 118 if (!os::numa_has_static_binding()) { |
0 | 119 #ifdef ASSERT |
120 MemRegion invalid(s->top(), s->end()); | |
121 ls->add_invalid_region(invalid); | |
141 | 122 #else |
123 if (ZapUnusedHeapArea) { | |
124 MemRegion invalid(s->top(), s->end()); | |
125 ls->add_invalid_region(invalid); | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
126 } else { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
127 return; |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
128 } |
0 | 129 #endif |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
130 } else { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
131 return; |
141 | 132 } |
0 | 133 } |
134 } | |
135 } | |
136 | |
137 size_t MutableNUMASpace::used_in_words() const { | |
138 size_t s = 0; | |
139 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
140 s += lgrp_spaces()->at(i)->space()->used_in_words(); | |
141 } | |
142 return s; | |
143 } | |
144 | |
145 size_t MutableNUMASpace::free_in_words() const { | |
146 size_t s = 0; | |
147 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
148 s += lgrp_spaces()->at(i)->space()->free_in_words(); | |
149 } | |
150 return s; | |
151 } | |
152 | |
153 | |
154 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { | |
155 guarantee(thr != NULL, "No thread"); | |
156 int lgrp_id = thr->lgrp_id(); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
157 if (lgrp_id == -1) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
158 // This case can occur after the topology of the system has |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
159 // changed. Thread can change their location, the new home |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
160 // group will be determined during the first allocation |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
161 // attempt. For now we can safely assume that all spaces |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
162 // have equal size because the whole space will be reinitialized. |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
163 if (lgrp_spaces()->length() > 0) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
164 return capacity_in_bytes() / lgrp_spaces()->length(); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
165 } else { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
166 assert(false, "There should be at least one locality group"); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
167 return 0; |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
168 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
169 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
170 // That's the normal case, where we know the locality group of the thread. |
0 | 171 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
172 if (i == -1) { | |
173 return 0; | |
174 } | |
175 return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); | |
176 } | |
177 | |
17727 | 178 size_t MutableNUMASpace::tlab_used(Thread *thr) const { |
179 // Please see the comments for tlab_capacity(). | |
180 guarantee(thr != NULL, "No thread"); | |
181 int lgrp_id = thr->lgrp_id(); | |
182 if (lgrp_id == -1) { | |
183 if (lgrp_spaces()->length() > 0) { | |
184 return (used_in_bytes()) / lgrp_spaces()->length(); | |
185 } else { | |
186 assert(false, "There should be at least one locality group"); | |
187 return 0; | |
188 } | |
189 } | |
190 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
191 if (i == -1) { | |
192 return 0; | |
193 } | |
194 return lgrp_spaces()->at(i)->space()->used_in_bytes(); | |
195 } | |
196 | |
197 | |
0 | 198 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
199 // Please see the comments for tlab_capacity(). |
0 | 200 guarantee(thr != NULL, "No thread"); |
201 int lgrp_id = thr->lgrp_id(); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
202 if (lgrp_id == -1) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
203 if (lgrp_spaces()->length() > 0) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
204 return free_in_bytes() / lgrp_spaces()->length(); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
205 } else { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
206 assert(false, "There should be at least one locality group"); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
207 return 0; |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
208 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
209 } |
0 | 210 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
211 if (i == -1) { | |
212 return 0; | |
213 } | |
214 return lgrp_spaces()->at(i)->space()->free_in_bytes(); | |
215 } | |
216 | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
217 |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
218 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
219 guarantee(thr != NULL, "No thread"); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
220 int lgrp_id = thr->lgrp_id(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
221 if (lgrp_id == -1) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
222 if (lgrp_spaces()->length() > 0) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
223 return capacity_in_words() / lgrp_spaces()->length(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
224 } else { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
225 assert(false, "There should be at least one locality group"); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
226 return 0; |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
227 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
228 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
229 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
230 if (i == -1) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
231 return 0; |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
232 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
233 return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
234 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
235 |
0 | 236 // Check if the NUMA topology has changed. Add and remove spaces if needed. |
237 // The update can be forced by setting the force parameter equal to true. | |
238 bool MutableNUMASpace::update_layout(bool force) { | |
239 // Check if the topology had changed. | |
240 bool changed = os::numa_topology_changed(); | |
241 if (force || changed) { | |
242 // Compute lgrp intersection. Add/remove spaces. | |
243 int lgrp_limit = (int)os::numa_get_groups_num(); | |
6197 | 244 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC); |
0 | 245 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); |
246 assert(lgrp_num > 0, "There should be at least one locality group"); | |
247 // Add new spaces for the new nodes | |
248 for (int i = 0; i < lgrp_num; i++) { | |
249 bool found = false; | |
250 for (int j = 0; j < lgrp_spaces()->length(); j++) { | |
251 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { | |
252 found = true; | |
253 break; | |
254 } | |
255 } | |
256 if (!found) { | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
257 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); |
0 | 258 } |
259 } | |
260 | |
261 // Remove spaces for the removed nodes. | |
262 for (int i = 0; i < lgrp_spaces()->length();) { | |
263 bool found = false; | |
264 for (int j = 0; j < lgrp_num; j++) { | |
265 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { | |
266 found = true; | |
267 break; | |
268 } | |
269 } | |
270 if (!found) { | |
271 delete lgrp_spaces()->at(i); | |
272 lgrp_spaces()->remove_at(i); | |
273 } else { | |
274 i++; | |
275 } | |
276 } | |
277 | |
6197 | 278 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC); |
0 | 279 |
280 if (changed) { | |
281 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { | |
282 thread->set_lgrp_id(-1); | |
283 } | |
284 } | |
285 return true; | |
286 } | |
287 return false; | |
288 } | |
289 | |
290 // Bias region towards the first-touching lgrp. Set the right page sizes. | |
141 | 291 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
0 | 292 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
293 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); | |
294 if (end > start) { | |
295 MemRegion aligned_region(start, end); | |
296 assert((intptr_t)aligned_region.start() % page_size() == 0 && | |
297 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); | |
298 assert(region().contains(aligned_region), "Sanity"); | |
141 | 299 // First we tell the OS which page size we want in the given range. The underlying |
300 // large page can be broken down if we require small pages. | |
0 | 301 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
141 | 302 // Then we uncommit the pages in the range. |
4734
20bfb6d15a94
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
3960
diff
changeset
|
303 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
141 | 304 // And make them local/first-touch biased. |
305 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); | |
0 | 306 } |
307 } | |
308 | |
309 // Free all pages in the region. | |
310 void MutableNUMASpace::free_region(MemRegion mr) { | |
311 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); | |
312 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); | |
313 if (end > start) { | |
314 MemRegion aligned_region(start, end); | |
315 assert((intptr_t)aligned_region.start() % page_size() == 0 && | |
316 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); | |
317 assert(region().contains(aligned_region), "Sanity"); | |
4734
20bfb6d15a94
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
3960
diff
changeset
|
318 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
0 | 319 } |
320 } | |
321 | |
322 // Update space layout. Perform adaptation. | |
323 void MutableNUMASpace::update() { | |
324 if (update_layout(false)) { | |
325 // If the topology has changed, make all chunks zero-sized. | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
326 // And clear the alloc-rate statistics. |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
327 // In future we may want to handle this more gracefully in order |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
328 // to avoid the reallocation of the pages as much as possible. |
0 | 329 for (int i = 0; i < lgrp_spaces()->length(); i++) { |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
330 LGRPSpace *ls = lgrp_spaces()->at(i); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
331 MutableSpace *s = ls->space(); |
0 | 332 s->set_end(s->bottom()); |
333 s->set_top(s->bottom()); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
334 ls->clear_alloc_rate(); |
0 | 335 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
336 // A NUMA space is never mangled |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
337 initialize(region(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
338 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
339 SpaceDecorator::DontMangle); |
0 | 340 } else { |
341 bool should_initialize = false; | |
141 | 342 if (!os::numa_has_static_binding()) { |
343 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
344 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { | |
345 should_initialize = true; | |
346 break; | |
347 } | |
0 | 348 } |
349 } | |
350 | |
351 if (should_initialize || | |
352 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
353 // A NUMA space is never mangled |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
354 initialize(region(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
355 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
356 SpaceDecorator::DontMangle); |
0 | 357 } |
358 } | |
359 | |
360 if (NUMAStats) { | |
361 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
362 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); | |
363 } | |
364 } | |
365 | |
366 scan_pages(NUMAPageScanRate); | |
367 } | |
368 | |
369 // Scan pages. Free pages that have smaller size or wrong placement. | |
370 void MutableNUMASpace::scan_pages(size_t page_count) | |
371 { | |
372 size_t pages_per_chunk = page_count / lgrp_spaces()->length(); | |
373 if (pages_per_chunk > 0) { | |
374 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
375 LGRPSpace *ls = lgrp_spaces()->at(i); | |
376 ls->scan_pages(page_size(), pages_per_chunk); | |
377 } | |
378 } | |
379 } | |
380 | |
381 // Accumulate statistics about the allocation rate of each lgrp. | |
382 void MutableNUMASpace::accumulate_statistics() { | |
383 if (UseAdaptiveNUMAChunkSizing) { | |
384 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
385 lgrp_spaces()->at(i)->sample(); | |
386 } | |
387 increment_samples_count(); | |
388 } | |
389 | |
390 if (NUMAStats) { | |
391 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
392 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); | |
393 } | |
394 } | |
395 } | |
396 | |
397 // Get the current size of a chunk. | |
398 // This function computes the size of the chunk based on the | |
399 // difference between chunk ends. This allows it to work correctly in | |
400 // case the whole space is resized and during the process of adaptive | |
401 // chunk resizing. | |
402 size_t MutableNUMASpace::current_chunk_size(int i) { | |
403 HeapWord *cur_end, *prev_end; | |
404 if (i == 0) { | |
405 prev_end = bottom(); | |
406 } else { | |
407 prev_end = lgrp_spaces()->at(i - 1)->space()->end(); | |
408 } | |
409 if (i == lgrp_spaces()->length() - 1) { | |
410 cur_end = end(); | |
411 } else { | |
412 cur_end = lgrp_spaces()->at(i)->space()->end(); | |
413 } | |
414 if (cur_end > prev_end) { | |
415 return pointer_delta(cur_end, prev_end, sizeof(char)); | |
416 } | |
417 return 0; | |
418 } | |
419 | |
420 // Return the default chunk size by equally diving the space. | |
421 // page_size() aligned. | |
422 size_t MutableNUMASpace::default_chunk_size() { | |
423 return base_space_size() / lgrp_spaces()->length() * page_size(); | |
424 } | |
425 | |
426 // Produce a new chunk size. page_size() aligned. | |
391
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
427 // This function is expected to be called on sequence of i's from 0 to |
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
428 // lgrp_spaces()->length(). |
0 | 429 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
430 size_t pages_available = base_space_size(); | |
431 for (int j = 0; j < i; j++) { | |
432 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size(); | |
433 } | |
434 pages_available -= lgrp_spaces()->length() - i - 1; | |
435 assert(pages_available > 0, "No pages left"); | |
436 float alloc_rate = 0; | |
437 for (int j = i; j < lgrp_spaces()->length(); j++) { | |
438 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); | |
439 } | |
440 size_t chunk_size = 0; | |
441 if (alloc_rate > 0) { | |
442 LGRPSpace *ls = lgrp_spaces()->at(i); | |
391
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
443 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
0 | 444 } |
445 chunk_size = MAX2(chunk_size, page_size()); | |
446 | |
447 if (limit > 0) { | |
448 limit = round_down(limit, page_size()); | |
449 if (chunk_size > current_chunk_size(i)) { | |
462
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
450 size_t upper_bound = pages_available * page_size(); |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
451 if (upper_bound > limit && |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
452 current_chunk_size(i) < upper_bound - limit) { |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
453 // The resulting upper bound should not exceed the available |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
454 // amount of memory (pages_available * page_size()). |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
455 upper_bound = current_chunk_size(i) + limit; |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
456 } |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
457 chunk_size = MIN2(chunk_size, upper_bound); |
0 | 458 } else { |
462
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
459 size_t lower_bound = page_size(); |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
460 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
461 lower_bound = current_chunk_size(i) - limit; |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
462 } |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
463 chunk_size = MAX2(chunk_size, lower_bound); |
0 | 464 } |
465 } | |
466 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); | |
467 return chunk_size; | |
468 } | |
469 | |
470 | |
471 // Return the bottom_region and the top_region. Align them to page_size() boundary. | |
472 // |------------------new_region---------------------------------| | |
473 // |----bottom_region--|---intersection---|------top_region------| | |
474 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, | |
475 MemRegion* bottom_region, MemRegion *top_region) { | |
476 // Is there bottom? | |
477 if (new_region.start() < intersection.start()) { // Yes | |
478 // Try to coalesce small pages into a large one. | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
479 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
480 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment()); |
0 | 481 if (new_region.contains(p) |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
482 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { |
0 | 483 if (intersection.contains(p)) { |
484 intersection = MemRegion(p, intersection.end()); | |
485 } else { | |
486 intersection = MemRegion(p, p); | |
487 } | |
488 } | |
489 } | |
490 *bottom_region = MemRegion(new_region.start(), intersection.start()); | |
491 } else { | |
492 *bottom_region = MemRegion(); | |
493 } | |
494 | |
495 // Is there top? | |
496 if (intersection.end() < new_region.end()) { // Yes | |
497 // Try to coalesce small pages into a large one. | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
498 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
499 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment()); |
0 | 500 if (new_region.contains(p) |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
501 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { |
0 | 502 if (intersection.contains(p)) { |
503 intersection = MemRegion(intersection.start(), p); | |
504 } else { | |
505 intersection = MemRegion(p, p); | |
506 } | |
507 } | |
508 } | |
509 *top_region = MemRegion(intersection.end(), new_region.end()); | |
510 } else { | |
511 *top_region = MemRegion(); | |
512 } | |
513 } | |
514 | |
515 // Try to merge the invalid region with the bottom or top region by decreasing | |
516 // the intersection area. Return the invalid_region aligned to the page_size() | |
517 // boundary if it's inside the intersection. Return non-empty invalid_region | |
518 // if it lies inside the intersection (also page-aligned). | |
519 // |------------------new_region---------------------------------| | |
520 // |----------------|-------invalid---|--------------------------| | |
521 // |----bottom_region--|---intersection---|------top_region------| | |
522 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, | |
523 MemRegion *invalid_region) { | |
524 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { | |
525 *intersection = MemRegion(invalid_region->end(), intersection->end()); | |
526 *invalid_region = MemRegion(); | |
527 } else | |
528 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { | |
529 *intersection = MemRegion(intersection->start(), invalid_region->start()); | |
530 *invalid_region = MemRegion(); | |
531 } else | |
532 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { | |
533 *intersection = MemRegion(new_region.start(), new_region.start()); | |
534 *invalid_region = MemRegion(); | |
535 } else | |
536 if (intersection->contains(invalid_region)) { | |
537 // That's the only case we have to make an additional bias_region() call. | |
538 HeapWord* start = invalid_region->start(); | |
539 HeapWord* end = invalid_region->end(); | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
540 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
541 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment()); |
0 | 542 if (new_region.contains(p)) { |
543 start = p; | |
544 } | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
545 p = (HeapWord*)round_to((intptr_t) end, alignment()); |
0 | 546 if (new_region.contains(end)) { |
547 end = p; | |
548 } | |
549 } | |
550 if (intersection->start() > start) { | |
551 *intersection = MemRegion(start, intersection->end()); | |
552 } | |
553 if (intersection->end() < end) { | |
554 *intersection = MemRegion(intersection->start(), end); | |
555 } | |
556 *invalid_region = MemRegion(start, end); | |
557 } | |
558 } | |
559 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
560 void MutableNUMASpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
561 bool clear_space, |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
562 bool mangle_space, |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
563 bool setup_pages) { |
0 | 564 assert(clear_space, "Reallocation will destory data!"); |
565 assert(lgrp_spaces()->length() > 0, "There should be at least one space"); | |
566 | |
567 MemRegion old_region = region(), new_region; | |
568 set_bottom(mr.start()); | |
569 set_end(mr.end()); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
570 // Must always clear the space |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
571 clear(SpaceDecorator::DontMangle); |
0 | 572 |
573 // Compute chunk sizes | |
574 size_t prev_page_size = page_size(); | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
575 set_page_size(UseLargePages ? alignment() : os::vm_page_size()); |
0 | 576 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
577 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); | |
578 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); | |
579 | |
580 // Try small pages if the chunk size is too small | |
581 if (base_space_size_pages / lgrp_spaces()->length() == 0 | |
582 && page_size() > (size_t)os::vm_page_size()) { | |
583 set_page_size(os::vm_page_size()); | |
584 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); | |
585 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); | |
586 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); | |
587 } | |
588 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); | |
589 set_base_space_size(base_space_size_pages); | |
590 | |
591 // Handle space resize | |
592 MemRegion top_region, bottom_region; | |
593 if (!old_region.equals(region())) { | |
594 new_region = MemRegion(rounded_bottom, rounded_end); | |
595 MemRegion intersection = new_region.intersection(old_region); | |
596 if (intersection.start() == NULL || | |
597 intersection.end() == NULL || | |
598 prev_page_size > page_size()) { // If the page size got smaller we have to change | |
599 // the page size preference for the whole space. | |
600 intersection = MemRegion(new_region.start(), new_region.start()); | |
601 } | |
602 select_tails(new_region, intersection, &bottom_region, &top_region); | |
141 | 603 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
604 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); | |
0 | 605 } |
606 | |
607 // Check if the space layout has changed significantly? | |
608 // This happens when the space has been resized so that either head or tail | |
609 // chunk became less than a page. | |
610 bool layout_valid = UseAdaptiveNUMAChunkSizing && | |
611 current_chunk_size(0) > page_size() && | |
612 current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); | |
613 | |
614 | |
615 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
616 LGRPSpace *ls = lgrp_spaces()->at(i); | |
617 MutableSpace *s = ls->space(); | |
618 old_region = s->region(); | |
619 | |
620 size_t chunk_byte_size = 0, old_chunk_byte_size = 0; | |
621 if (i < lgrp_spaces()->length() - 1) { | |
622 if (!UseAdaptiveNUMAChunkSizing || | |
623 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || | |
624 samples_count() < AdaptiveSizePolicyReadyThreshold) { | |
625 // No adaptation. Divide the space equally. | |
626 chunk_byte_size = default_chunk_size(); | |
627 } else | |
628 if (!layout_valid || NUMASpaceResizeRate == 0) { | |
629 // Fast adaptation. If no space resize rate is set, resize | |
630 // the chunks instantly. | |
631 chunk_byte_size = adaptive_chunk_size(i, 0); | |
632 } else { | |
633 // Slow adaptation. Resize the chunks moving no more than | |
634 // NUMASpaceResizeRate bytes per collection. | |
635 size_t limit = NUMASpaceResizeRate / | |
636 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); | |
637 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); | |
638 } | |
639 | |
640 assert(chunk_byte_size >= page_size(), "Chunk size too small"); | |
641 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check"); | |
642 } | |
643 | |
644 if (i == 0) { // Bottom chunk | |
645 if (i != lgrp_spaces()->length() - 1) { | |
646 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); | |
647 } else { | |
648 new_region = MemRegion(bottom(), end()); | |
649 } | |
650 } else | |
651 if (i < lgrp_spaces()->length() - 1) { // Middle chunks | |
652 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); | |
653 new_region = MemRegion(ps->end(), | |
654 ps->end() + (chunk_byte_size >> LogHeapWordSize)); | |
655 } else { // Top chunk | |
656 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); | |
657 new_region = MemRegion(ps->end(), end()); | |
658 } | |
659 guarantee(region().contains(new_region), "Region invariant"); | |
660 | |
661 | |
662 // The general case: | |
663 // |---------------------|--invalid---|--------------------------| | |
664 // |------------------new_region---------------------------------| | |
665 // |----bottom_region--|---intersection---|------top_region------| | |
666 // |----old_region----| | |
667 // The intersection part has all pages in place we don't need to migrate them. | |
668 // Pages for the top and bottom part should be freed and then reallocated. | |
669 | |
670 MemRegion intersection = old_region.intersection(new_region); | |
671 | |
672 if (intersection.start() == NULL || intersection.end() == NULL) { | |
673 intersection = MemRegion(new_region.start(), new_region.start()); | |
674 } | |
675 | |
141 | 676 if (!os::numa_has_static_binding()) { |
677 MemRegion invalid_region = ls->invalid_region().intersection(new_region); | |
678 // Invalid region is a range of memory that could've possibly | |
679 // been allocated on the other node. That's relevant only on Solaris where | |
680 // there is no static memory binding. | |
681 if (!invalid_region.is_empty()) { | |
682 merge_regions(new_region, &intersection, &invalid_region); | |
683 free_region(invalid_region); | |
684 ls->set_invalid_region(MemRegion()); | |
685 } | |
0 | 686 } |
141 | 687 |
0 | 688 select_tails(new_region, intersection, &bottom_region, &top_region); |
141 | 689 |
690 if (!os::numa_has_static_binding()) { | |
691 // If that's a system with the first-touch policy then it's enough | |
692 // to free the pages. | |
693 free_region(bottom_region); | |
694 free_region(top_region); | |
695 } else { | |
696 // In a system with static binding we have to change the bias whenever | |
697 // we reshape the heap. | |
698 bias_region(bottom_region, ls->lgrp_id()); | |
699 bias_region(top_region, ls->lgrp_id()); | |
700 } | |
0 | 701 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
702 // Clear space (set top = bottom) but never mangle. |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
703 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); |
0 | 704 |
705 set_adaptation_cycles(samples_count()); | |
706 } | |
707 } | |
708 | |
709 // Set the top of the whole space. | |
710 // Mark the the holes in chunks below the top() as invalid. | |
711 void MutableNUMASpace::set_top(HeapWord* value) { | |
712 bool found_top = false; | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
713 for (int i = 0; i < lgrp_spaces()->length();) { |
0 | 714 LGRPSpace *ls = lgrp_spaces()->at(i); |
715 MutableSpace *s = ls->space(); | |
716 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); | |
717 | |
718 if (s->contains(value)) { | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
719 // Check if setting the chunk's top to a given value would create a hole less than |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
720 // a minimal object; assuming that's not the last chunk in which case we don't care. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
721 if (i < lgrp_spaces()->length() - 1) { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
722 size_t remainder = pointer_delta(s->end(), value); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
723 const size_t min_fill_size = CollectedHeap::min_fill_size(); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
724 if (remainder < min_fill_size && remainder > 0) { |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
725 // Add a minimum size filler object; it will cross the chunk boundary. |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
726 CollectedHeap::fill_with_object(value, min_fill_size); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
727 value += min_fill_size; |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
728 assert(!s->contains(value), "Should be in the next chunk"); |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
729 // Restart the loop from the same chunk, since the value has moved |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
730 // to the next one. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
731 continue; |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
732 } |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
733 } |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
734 |
141 | 735 if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
0 | 736 ls->add_invalid_region(MemRegion(top, value)); |
737 } | |
738 s->set_top(value); | |
739 found_top = true; | |
740 } else { | |
741 if (found_top) { | |
742 s->set_top(s->bottom()); | |
743 } else { | |
141 | 744 if (!os::numa_has_static_binding() && top < s->end()) { |
745 ls->add_invalid_region(MemRegion(top, s->end())); | |
746 } | |
747 s->set_top(s->end()); | |
0 | 748 } |
749 } | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
750 i++; |
0 | 751 } |
752 MutableSpace::set_top(value); | |
753 } | |
754 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
755 void MutableNUMASpace::clear(bool mangle_space) { |
0 | 756 MutableSpace::set_top(bottom()); |
757 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
758 // Never mangle NUMA spaces because the mangling will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
759 // bind the memory to a possibly unwanted lgroup. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
760 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
0 | 761 } |
762 } | |
763 | |
141 | 764 /* |
765 Linux supports static memory binding, therefore the most part of the | |
766 logic dealing with the possible invalid page allocation is effectively | |
767 disabled. Besides there is no notion of the home node in Linux. A | |
768 thread is allowed to migrate freely. Although the scheduler is rather | |
769 reluctant to move threads between the nodes. We check for the current | |
770 node every allocation. And with a high probability a thread stays on | |
771 the same node for some time allowing local access to recently allocated | |
772 objects. | |
773 */ | |
774 | |
0 | 775 HeapWord* MutableNUMASpace::allocate(size_t size) { |
141 | 776 Thread* thr = Thread::current(); |
777 int lgrp_id = thr->lgrp_id(); | |
778 if (lgrp_id == -1 || !os::numa_has_group_homing()) { | |
0 | 779 lgrp_id = os::numa_get_group_id(); |
141 | 780 thr->set_lgrp_id(lgrp_id); |
0 | 781 } |
782 | |
783 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
784 | |
785 // It is possible that a new CPU has been hotplugged and | |
786 // we haven't reshaped the space accordingly. | |
787 if (i == -1) { | |
788 i = os::random() % lgrp_spaces()->length(); | |
789 } | |
790 | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
791 LGRPSpace* ls = lgrp_spaces()->at(i); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
792 MutableSpace *s = ls->space(); |
0 | 793 HeapWord *p = s->allocate(size); |
794 | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
795 if (p != NULL) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
796 size_t remainder = s->free_in_words(); |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
628
diff
changeset
|
797 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
798 s->set_top(s->top() - size); |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
799 p = NULL; |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
800 } |
0 | 801 } |
802 if (p != NULL) { | |
803 if (top() < s->top()) { // Keep _top updated. | |
804 MutableSpace::set_top(s->top()); | |
805 } | |
806 } | |
141 | 807 // Make the page allocation happen here if there is no static binding.. |
808 if (p != NULL && !os::numa_has_static_binding()) { | |
0 | 809 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
810 *(int*)i = 0; | |
811 } | |
812 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
813 if (p == NULL) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
814 ls->set_allocation_failed(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
815 } |
0 | 816 return p; |
817 } | |
818 | |
819 // This version is lock-free. | |
820 HeapWord* MutableNUMASpace::cas_allocate(size_t size) { | |
141 | 821 Thread* thr = Thread::current(); |
822 int lgrp_id = thr->lgrp_id(); | |
823 if (lgrp_id == -1 || !os::numa_has_group_homing()) { | |
0 | 824 lgrp_id = os::numa_get_group_id(); |
141 | 825 thr->set_lgrp_id(lgrp_id); |
0 | 826 } |
827 | |
828 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
829 // It is possible that a new CPU has been hotplugged and | |
830 // we haven't reshaped the space accordingly. | |
831 if (i == -1) { | |
832 i = os::random() % lgrp_spaces()->length(); | |
833 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
834 LGRPSpace *ls = lgrp_spaces()->at(i); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
835 MutableSpace *s = ls->space(); |
0 | 836 HeapWord *p = s->cas_allocate(size); |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
837 if (p != NULL) { |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
838 size_t remainder = pointer_delta(s->end(), p + size); |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
628
diff
changeset
|
839 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
840 if (s->cas_deallocate(p, size)) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
841 // We were the last to allocate and created a fragment less than |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
842 // a minimal object. |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
843 p = NULL; |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
844 } else { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
845 guarantee(false, "Deallocation should always succeed"); |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
846 } |
0 | 847 } |
848 } | |
849 if (p != NULL) { | |
850 HeapWord* cur_top, *cur_chunk_top = p + size; | |
851 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. | |
852 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { | |
853 break; | |
854 } | |
855 } | |
856 } | |
857 | |
141 | 858 // Make the page allocation happen here if there is no static binding. |
859 if (p != NULL && !os::numa_has_static_binding() ) { | |
0 | 860 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
861 *(int*)i = 0; | |
862 } | |
863 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
864 if (p == NULL) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
865 ls->set_allocation_failed(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
866 } |
0 | 867 return p; |
868 } | |
869 | |
870 void MutableNUMASpace::print_short_on(outputStream* st) const { | |
871 MutableSpace::print_short_on(st); | |
872 st->print(" ("); | |
873 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
874 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id()); | |
875 lgrp_spaces()->at(i)->space()->print_short_on(st); | |
876 if (i < lgrp_spaces()->length() - 1) { | |
877 st->print(", "); | |
878 } | |
879 } | |
880 st->print(")"); | |
881 } | |
882 | |
883 void MutableNUMASpace::print_on(outputStream* st) const { | |
884 MutableSpace::print_on(st); | |
885 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
886 LGRPSpace *ls = lgrp_spaces()->at(i); | |
887 st->print(" lgrp %d", ls->lgrp_id()); | |
888 ls->space()->print_on(st); | |
889 if (NUMAStats) { | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
890 for (int i = 0; i < lgrp_spaces()->length(); i++) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
891 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
892 } |
0 | 893 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n", |
894 ls->space_stats()->_local_space / K, | |
895 ls->space_stats()->_remote_space / K, | |
896 ls->space_stats()->_unbiased_space / K, | |
897 ls->space_stats()->_uncommited_space / K, | |
898 ls->space_stats()->_large_pages, | |
899 ls->space_stats()->_small_pages); | |
900 } | |
901 } | |
902 } | |
903 | |
6008 | 904 void MutableNUMASpace::verify() { |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
905 // This can be called after setting an arbitary value to the space's top, |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
906 // so an object can cross the chunk boundary. We ensure the parsablity |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
907 // of the space and just walk the objects in linear fashion. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
908 ensure_parsability(); |
6008 | 909 MutableSpace::verify(); |
0 | 910 } |
911 | |
912 // Scan pages and gather stats about page placement and size. | |
913 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { | |
914 clear_space_stats(); | |
915 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size); | |
916 char* end = (char*)round_down((intptr_t) space()->end(), page_size); | |
917 if (start < end) { | |
918 for (char *p = start; p < end;) { | |
919 os::page_info info; | |
920 if (os::get_page_info(p, &info)) { | |
921 if (info.size > 0) { | |
922 if (info.size > (size_t)os::vm_page_size()) { | |
923 space_stats()->_large_pages++; | |
924 } else { | |
925 space_stats()->_small_pages++; | |
926 } | |
927 if (info.lgrp_id == lgrp_id()) { | |
928 space_stats()->_local_space += info.size; | |
929 } else { | |
930 space_stats()->_remote_space += info.size; | |
931 } | |
932 p += info.size; | |
933 } else { | |
934 p += os::vm_page_size(); | |
935 space_stats()->_uncommited_space += os::vm_page_size(); | |
936 } | |
937 } else { | |
938 return; | |
939 } | |
940 } | |
941 } | |
942 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + | |
943 pointer_delta(space()->end(), end, sizeof(char)); | |
944 | |
945 } | |
946 | |
947 // Scan page_count pages and verify if they have the right size and right placement. | |
948 // If invalid pages are found they are freed in hope that subsequent reallocation | |
949 // will be more successful. | |
950 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) | |
951 { | |
952 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size); | |
953 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size); | |
954 | |
955 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { | |
956 set_last_page_scanned(range_start); | |
957 } | |
958 | |
959 char *scan_start = last_page_scanned(); | |
960 char* scan_end = MIN2(scan_start + page_size * page_count, range_end); | |
961 | |
962 os::page_info page_expected, page_found; | |
963 page_expected.size = page_size; | |
964 page_expected.lgrp_id = lgrp_id(); | |
965 | |
966 char *s = scan_start; | |
967 while (s < scan_end) { | |
968 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); | |
969 if (e == NULL) { | |
970 break; | |
971 } | |
972 if (e != scan_end) { | |
8739
ca9580859cf4
8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents:
7180
diff
changeset
|
973 assert(e < scan_end, err_msg("e: " PTR_FORMAT " scan_end: " PTR_FORMAT, e, scan_end)); |
ca9580859cf4
8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents:
7180
diff
changeset
|
974 |
0 | 975 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) |
976 && page_expected.size != 0) { | |
4734
20bfb6d15a94
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
3960
diff
changeset
|
977 os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size); |
0 | 978 } |
979 page_expected = page_found; | |
980 } | |
981 s = e; | |
982 } | |
983 | |
984 set_last_page_scanned(scan_end); | |
985 } |