0
|
1
|
|
2 /*
|
|
3 * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 *
|
|
6 * This code is free software; you can redistribute it and/or modify it
|
|
7 * under the terms of the GNU General Public License version 2 only, as
|
|
8 * published by the Free Software Foundation.
|
|
9 *
|
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 * version 2 for more details (a copy is included in the LICENSE file that
|
|
14 * accompanied this code).
|
|
15 *
|
|
16 * You should have received a copy of the GNU General Public License version
|
|
17 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 *
|
|
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
21 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
22 * have any questions.
|
|
23 *
|
|
24 */
|
|
25
|
|
26 # include "incls/_precompiled.incl"
|
|
27 # include "incls/_mutableNUMASpace.cpp.incl"
|
|
28
|
|
29
|
|
30 MutableNUMASpace::MutableNUMASpace() {
|
|
31 _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
|
|
32 _page_size = os::vm_page_size();
|
|
33 _adaptation_cycles = 0;
|
|
34 _samples_count = 0;
|
|
35 update_layout(true);
|
|
36 }
|
|
37
|
|
38 MutableNUMASpace::~MutableNUMASpace() {
|
|
39 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
40 delete lgrp_spaces()->at(i);
|
|
41 }
|
|
42 delete lgrp_spaces();
|
|
43 }
|
|
44
|
|
45 void MutableNUMASpace::mangle_unused_area() {
|
|
46 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
47 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
48 MutableSpace *s = ls->space();
|
141
|
49 if (!os::numa_has_static_binding()) {
|
|
50 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
|
|
51 if (top < s->end()) {
|
|
52 ls->add_invalid_region(MemRegion(top, s->end()));
|
|
53 }
|
0
|
54 }
|
|
55 s->mangle_unused_area();
|
|
56 }
|
|
57 }
|
|
58
|
|
59 // There may be unallocated holes in the middle chunks
|
|
60 // that should be filled with dead objects to ensure parseability.
|
|
61 void MutableNUMASpace::ensure_parsability() {
|
|
62 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
63 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
64 MutableSpace *s = ls->space();
|
|
65 if (!s->contains(top())) {
|
|
66 if (s->free_in_words() > 0) {
|
|
67 SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
|
|
68 size_t area_touched_words = pointer_delta(s->end(), s->top(), sizeof(HeapWordSize));
|
|
69 #ifndef ASSERT
|
|
70 if (!ZapUnusedHeapArea) {
|
|
71 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
|
|
72 area_touched_words);
|
|
73 }
|
|
74 #endif
|
141
|
75 if (!os::numa_has_static_binding()) {
|
|
76 MemRegion invalid;
|
|
77 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
|
|
78 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
|
|
79 os::vm_page_size());
|
|
80 if (crossing_start != crossing_end) {
|
|
81 // If object header crossed a small page boundary we mark the area
|
|
82 // as invalid rounding it to a page_size().
|
|
83 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
|
|
84 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
|
|
85 s->end());
|
|
86 invalid = MemRegion(start, end);
|
|
87 }
|
|
88
|
|
89 ls->add_invalid_region(invalid);
|
0
|
90 }
|
|
91 s->set_top(s->end());
|
|
92 }
|
|
93 } else {
|
141
|
94 if (!os::numa_has_static_binding()) {
|
0
|
95 #ifdef ASSERT
|
|
96 MemRegion invalid(s->top(), s->end());
|
|
97 ls->add_invalid_region(invalid);
|
141
|
98 #else
|
|
99 if (ZapUnusedHeapArea) {
|
|
100 MemRegion invalid(s->top(), s->end());
|
|
101 ls->add_invalid_region(invalid);
|
|
102 } else break;
|
0
|
103 #endif
|
141
|
104 }
|
0
|
105 }
|
|
106 }
|
|
107 }
|
|
108
|
|
109 size_t MutableNUMASpace::used_in_words() const {
|
|
110 size_t s = 0;
|
|
111 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
112 s += lgrp_spaces()->at(i)->space()->used_in_words();
|
|
113 }
|
|
114 return s;
|
|
115 }
|
|
116
|
|
117 size_t MutableNUMASpace::free_in_words() const {
|
|
118 size_t s = 0;
|
|
119 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
120 s += lgrp_spaces()->at(i)->space()->free_in_words();
|
|
121 }
|
|
122 return s;
|
|
123 }
|
|
124
|
|
125
|
|
126 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
|
|
127 guarantee(thr != NULL, "No thread");
|
|
128 int lgrp_id = thr->lgrp_id();
|
|
129 assert(lgrp_id != -1, "No lgrp_id set");
|
|
130 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
|
131 if (i == -1) {
|
|
132 return 0;
|
|
133 }
|
|
134 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
|
|
135 }
|
|
136
|
|
137 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
|
|
138 guarantee(thr != NULL, "No thread");
|
|
139 int lgrp_id = thr->lgrp_id();
|
|
140 assert(lgrp_id != -1, "No lgrp_id set");
|
|
141 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
|
142 if (i == -1) {
|
|
143 return 0;
|
|
144 }
|
|
145 return lgrp_spaces()->at(i)->space()->free_in_bytes();
|
|
146 }
|
|
147
|
|
148 // Check if the NUMA topology has changed. Add and remove spaces if needed.
|
|
149 // The update can be forced by setting the force parameter equal to true.
|
|
150 bool MutableNUMASpace::update_layout(bool force) {
|
|
151 // Check if the topology had changed.
|
|
152 bool changed = os::numa_topology_changed();
|
|
153 if (force || changed) {
|
|
154 // Compute lgrp intersection. Add/remove spaces.
|
|
155 int lgrp_limit = (int)os::numa_get_groups_num();
|
|
156 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
|
|
157 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
|
|
158 assert(lgrp_num > 0, "There should be at least one locality group");
|
|
159 // Add new spaces for the new nodes
|
|
160 for (int i = 0; i < lgrp_num; i++) {
|
|
161 bool found = false;
|
|
162 for (int j = 0; j < lgrp_spaces()->length(); j++) {
|
|
163 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
|
|
164 found = true;
|
|
165 break;
|
|
166 }
|
|
167 }
|
|
168 if (!found) {
|
|
169 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i]));
|
|
170 }
|
|
171 }
|
|
172
|
|
173 // Remove spaces for the removed nodes.
|
|
174 for (int i = 0; i < lgrp_spaces()->length();) {
|
|
175 bool found = false;
|
|
176 for (int j = 0; j < lgrp_num; j++) {
|
|
177 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
|
|
178 found = true;
|
|
179 break;
|
|
180 }
|
|
181 }
|
|
182 if (!found) {
|
|
183 delete lgrp_spaces()->at(i);
|
|
184 lgrp_spaces()->remove_at(i);
|
|
185 } else {
|
|
186 i++;
|
|
187 }
|
|
188 }
|
|
189
|
|
190 FREE_C_HEAP_ARRAY(int, lgrp_ids);
|
|
191
|
|
192 if (changed) {
|
|
193 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
|
|
194 thread->set_lgrp_id(-1);
|
|
195 }
|
|
196 }
|
|
197 return true;
|
|
198 }
|
|
199 return false;
|
|
200 }
|
|
201
|
|
202 // Bias region towards the first-touching lgrp. Set the right page sizes.
|
141
|
203 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
|
0
|
204 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
|
|
205 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
|
|
206 if (end > start) {
|
|
207 MemRegion aligned_region(start, end);
|
|
208 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
|
|
209 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
|
|
210 assert(region().contains(aligned_region), "Sanity");
|
141
|
211 // First we tell the OS which page size we want in the given range. The underlying
|
|
212 // large page can be broken down if we require small pages.
|
0
|
213 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
|
141
|
214 // Then we uncommit the pages in the range.
|
|
215 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
|
|
216 // And make them local/first-touch biased.
|
|
217 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
|
0
|
218 }
|
|
219 }
|
|
220
|
|
221 // Free all pages in the region.
|
|
222 void MutableNUMASpace::free_region(MemRegion mr) {
|
|
223 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
|
|
224 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
|
|
225 if (end > start) {
|
|
226 MemRegion aligned_region(start, end);
|
|
227 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
|
|
228 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
|
|
229 assert(region().contains(aligned_region), "Sanity");
|
|
230 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
|
|
231 }
|
|
232 }
|
|
233
|
|
234 // Update space layout. Perform adaptation.
|
|
235 void MutableNUMASpace::update() {
|
|
236 if (update_layout(false)) {
|
|
237 // If the topology has changed, make all chunks zero-sized.
|
|
238 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
239 MutableSpace *s = lgrp_spaces()->at(i)->space();
|
|
240 s->set_end(s->bottom());
|
|
241 s->set_top(s->bottom());
|
|
242 }
|
|
243 initialize(region(), true);
|
|
244 } else {
|
|
245 bool should_initialize = false;
|
141
|
246 if (!os::numa_has_static_binding()) {
|
|
247 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
248 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
|
|
249 should_initialize = true;
|
|
250 break;
|
|
251 }
|
0
|
252 }
|
|
253 }
|
|
254
|
|
255 if (should_initialize ||
|
|
256 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
|
|
257 initialize(region(), true);
|
|
258 }
|
|
259 }
|
|
260
|
|
261 if (NUMAStats) {
|
|
262 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
263 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
|
|
264 }
|
|
265 }
|
|
266
|
|
267 scan_pages(NUMAPageScanRate);
|
|
268 }
|
|
269
|
|
270 // Scan pages. Free pages that have smaller size or wrong placement.
|
|
271 void MutableNUMASpace::scan_pages(size_t page_count)
|
|
272 {
|
|
273 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
|
|
274 if (pages_per_chunk > 0) {
|
|
275 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
276 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
277 ls->scan_pages(page_size(), pages_per_chunk);
|
|
278 }
|
|
279 }
|
|
280 }
|
|
281
|
|
282 // Accumulate statistics about the allocation rate of each lgrp.
|
|
283 void MutableNUMASpace::accumulate_statistics() {
|
|
284 if (UseAdaptiveNUMAChunkSizing) {
|
|
285 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
286 lgrp_spaces()->at(i)->sample();
|
|
287 }
|
|
288 increment_samples_count();
|
|
289 }
|
|
290
|
|
291 if (NUMAStats) {
|
|
292 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
293 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
|
|
294 }
|
|
295 }
|
|
296 }
|
|
297
|
|
298 // Get the current size of a chunk.
|
|
299 // This function computes the size of the chunk based on the
|
|
300 // difference between chunk ends. This allows it to work correctly in
|
|
301 // case the whole space is resized and during the process of adaptive
|
|
302 // chunk resizing.
|
|
303 size_t MutableNUMASpace::current_chunk_size(int i) {
|
|
304 HeapWord *cur_end, *prev_end;
|
|
305 if (i == 0) {
|
|
306 prev_end = bottom();
|
|
307 } else {
|
|
308 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
|
|
309 }
|
|
310 if (i == lgrp_spaces()->length() - 1) {
|
|
311 cur_end = end();
|
|
312 } else {
|
|
313 cur_end = lgrp_spaces()->at(i)->space()->end();
|
|
314 }
|
|
315 if (cur_end > prev_end) {
|
|
316 return pointer_delta(cur_end, prev_end, sizeof(char));
|
|
317 }
|
|
318 return 0;
|
|
319 }
|
|
320
|
|
321 // Return the default chunk size by equally diving the space.
|
|
322 // page_size() aligned.
|
|
323 size_t MutableNUMASpace::default_chunk_size() {
|
|
324 return base_space_size() / lgrp_spaces()->length() * page_size();
|
|
325 }
|
|
326
|
|
327 // Produce a new chunk size. page_size() aligned.
|
|
328 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
|
|
329 size_t pages_available = base_space_size();
|
|
330 for (int j = 0; j < i; j++) {
|
|
331 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
|
|
332 }
|
|
333 pages_available -= lgrp_spaces()->length() - i - 1;
|
|
334 assert(pages_available > 0, "No pages left");
|
|
335 float alloc_rate = 0;
|
|
336 for (int j = i; j < lgrp_spaces()->length(); j++) {
|
|
337 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
|
|
338 }
|
|
339 size_t chunk_size = 0;
|
|
340 if (alloc_rate > 0) {
|
|
341 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
342 chunk_size = (size_t)(ls->alloc_rate()->average() * pages_available / alloc_rate) * page_size();
|
|
343 }
|
|
344 chunk_size = MAX2(chunk_size, page_size());
|
|
345
|
|
346 if (limit > 0) {
|
|
347 limit = round_down(limit, page_size());
|
|
348 if (chunk_size > current_chunk_size(i)) {
|
|
349 chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit);
|
|
350 } else {
|
|
351 chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit);
|
|
352 }
|
|
353 }
|
|
354 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
|
|
355 return chunk_size;
|
|
356 }
|
|
357
|
|
358
|
|
359 // Return the bottom_region and the top_region. Align them to page_size() boundary.
|
|
360 // |------------------new_region---------------------------------|
|
|
361 // |----bottom_region--|---intersection---|------top_region------|
|
|
362 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
|
|
363 MemRegion* bottom_region, MemRegion *top_region) {
|
|
364 // Is there bottom?
|
|
365 if (new_region.start() < intersection.start()) { // Yes
|
|
366 // Try to coalesce small pages into a large one.
|
|
367 if (UseLargePages && page_size() >= os::large_page_size()) {
|
|
368 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), os::large_page_size());
|
|
369 if (new_region.contains(p)
|
|
370 && pointer_delta(p, new_region.start(), sizeof(char)) >= os::large_page_size()) {
|
|
371 if (intersection.contains(p)) {
|
|
372 intersection = MemRegion(p, intersection.end());
|
|
373 } else {
|
|
374 intersection = MemRegion(p, p);
|
|
375 }
|
|
376 }
|
|
377 }
|
|
378 *bottom_region = MemRegion(new_region.start(), intersection.start());
|
|
379 } else {
|
|
380 *bottom_region = MemRegion();
|
|
381 }
|
|
382
|
|
383 // Is there top?
|
|
384 if (intersection.end() < new_region.end()) { // Yes
|
|
385 // Try to coalesce small pages into a large one.
|
|
386 if (UseLargePages && page_size() >= os::large_page_size()) {
|
|
387 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), os::large_page_size());
|
|
388 if (new_region.contains(p)
|
|
389 && pointer_delta(new_region.end(), p, sizeof(char)) >= os::large_page_size()) {
|
|
390 if (intersection.contains(p)) {
|
|
391 intersection = MemRegion(intersection.start(), p);
|
|
392 } else {
|
|
393 intersection = MemRegion(p, p);
|
|
394 }
|
|
395 }
|
|
396 }
|
|
397 *top_region = MemRegion(intersection.end(), new_region.end());
|
|
398 } else {
|
|
399 *top_region = MemRegion();
|
|
400 }
|
|
401 }
|
|
402
|
|
403 // Try to merge the invalid region with the bottom or top region by decreasing
|
|
404 // the intersection area. Return the invalid_region aligned to the page_size()
|
|
405 // boundary if it's inside the intersection. Return non-empty invalid_region
|
|
406 // if it lies inside the intersection (also page-aligned).
|
|
407 // |------------------new_region---------------------------------|
|
|
408 // |----------------|-------invalid---|--------------------------|
|
|
409 // |----bottom_region--|---intersection---|------top_region------|
|
|
410 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
|
|
411 MemRegion *invalid_region) {
|
|
412 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
|
|
413 *intersection = MemRegion(invalid_region->end(), intersection->end());
|
|
414 *invalid_region = MemRegion();
|
|
415 } else
|
|
416 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
|
|
417 *intersection = MemRegion(intersection->start(), invalid_region->start());
|
|
418 *invalid_region = MemRegion();
|
|
419 } else
|
|
420 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
|
|
421 *intersection = MemRegion(new_region.start(), new_region.start());
|
|
422 *invalid_region = MemRegion();
|
|
423 } else
|
|
424 if (intersection->contains(invalid_region)) {
|
|
425 // That's the only case we have to make an additional bias_region() call.
|
|
426 HeapWord* start = invalid_region->start();
|
|
427 HeapWord* end = invalid_region->end();
|
|
428 if (UseLargePages && page_size() >= os::large_page_size()) {
|
|
429 HeapWord *p = (HeapWord*)round_down((intptr_t) start, os::large_page_size());
|
|
430 if (new_region.contains(p)) {
|
|
431 start = p;
|
|
432 }
|
|
433 p = (HeapWord*)round_to((intptr_t) end, os::large_page_size());
|
|
434 if (new_region.contains(end)) {
|
|
435 end = p;
|
|
436 }
|
|
437 }
|
|
438 if (intersection->start() > start) {
|
|
439 *intersection = MemRegion(start, intersection->end());
|
|
440 }
|
|
441 if (intersection->end() < end) {
|
|
442 *intersection = MemRegion(intersection->start(), end);
|
|
443 }
|
|
444 *invalid_region = MemRegion(start, end);
|
|
445 }
|
|
446 }
|
|
447
|
|
448 void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
|
|
449 assert(clear_space, "Reallocation will destory data!");
|
|
450 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
|
|
451
|
|
452 MemRegion old_region = region(), new_region;
|
|
453 set_bottom(mr.start());
|
|
454 set_end(mr.end());
|
|
455 MutableSpace::set_top(bottom());
|
|
456
|
|
457 // Compute chunk sizes
|
|
458 size_t prev_page_size = page_size();
|
|
459 set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size());
|
|
460 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
|
|
461 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
|
|
462 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
|
|
463
|
|
464 // Try small pages if the chunk size is too small
|
|
465 if (base_space_size_pages / lgrp_spaces()->length() == 0
|
|
466 && page_size() > (size_t)os::vm_page_size()) {
|
|
467 set_page_size(os::vm_page_size());
|
|
468 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
|
|
469 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
|
|
470 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
|
|
471 }
|
|
472 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
|
|
473 set_base_space_size(base_space_size_pages);
|
|
474
|
|
475 // Handle space resize
|
|
476 MemRegion top_region, bottom_region;
|
|
477 if (!old_region.equals(region())) {
|
|
478 new_region = MemRegion(rounded_bottom, rounded_end);
|
|
479 MemRegion intersection = new_region.intersection(old_region);
|
|
480 if (intersection.start() == NULL ||
|
|
481 intersection.end() == NULL ||
|
|
482 prev_page_size > page_size()) { // If the page size got smaller we have to change
|
|
483 // the page size preference for the whole space.
|
|
484 intersection = MemRegion(new_region.start(), new_region.start());
|
|
485 }
|
|
486 select_tails(new_region, intersection, &bottom_region, &top_region);
|
141
|
487 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
|
|
488 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
|
0
|
489 }
|
|
490
|
|
491 // Check if the space layout has changed significantly?
|
|
492 // This happens when the space has been resized so that either head or tail
|
|
493 // chunk became less than a page.
|
|
494 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
|
|
495 current_chunk_size(0) > page_size() &&
|
|
496 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
|
|
497
|
|
498
|
|
499 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
500 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
501 MutableSpace *s = ls->space();
|
|
502 old_region = s->region();
|
|
503
|
|
504 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
|
|
505 if (i < lgrp_spaces()->length() - 1) {
|
|
506 if (!UseAdaptiveNUMAChunkSizing ||
|
|
507 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
|
|
508 samples_count() < AdaptiveSizePolicyReadyThreshold) {
|
|
509 // No adaptation. Divide the space equally.
|
|
510 chunk_byte_size = default_chunk_size();
|
|
511 } else
|
|
512 if (!layout_valid || NUMASpaceResizeRate == 0) {
|
|
513 // Fast adaptation. If no space resize rate is set, resize
|
|
514 // the chunks instantly.
|
|
515 chunk_byte_size = adaptive_chunk_size(i, 0);
|
|
516 } else {
|
|
517 // Slow adaptation. Resize the chunks moving no more than
|
|
518 // NUMASpaceResizeRate bytes per collection.
|
|
519 size_t limit = NUMASpaceResizeRate /
|
|
520 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
|
|
521 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
|
|
522 }
|
|
523
|
|
524 assert(chunk_byte_size >= page_size(), "Chunk size too small");
|
|
525 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
|
|
526 }
|
|
527
|
|
528 if (i == 0) { // Bottom chunk
|
|
529 if (i != lgrp_spaces()->length() - 1) {
|
|
530 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
|
|
531 } else {
|
|
532 new_region = MemRegion(bottom(), end());
|
|
533 }
|
|
534 } else
|
|
535 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
|
|
536 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
|
|
537 new_region = MemRegion(ps->end(),
|
|
538 ps->end() + (chunk_byte_size >> LogHeapWordSize));
|
|
539 } else { // Top chunk
|
|
540 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
|
|
541 new_region = MemRegion(ps->end(), end());
|
|
542 }
|
|
543 guarantee(region().contains(new_region), "Region invariant");
|
|
544
|
|
545
|
|
546 // The general case:
|
|
547 // |---------------------|--invalid---|--------------------------|
|
|
548 // |------------------new_region---------------------------------|
|
|
549 // |----bottom_region--|---intersection---|------top_region------|
|
|
550 // |----old_region----|
|
|
551 // The intersection part has all pages in place we don't need to migrate them.
|
|
552 // Pages for the top and bottom part should be freed and then reallocated.
|
|
553
|
|
554 MemRegion intersection = old_region.intersection(new_region);
|
|
555
|
|
556 if (intersection.start() == NULL || intersection.end() == NULL) {
|
|
557 intersection = MemRegion(new_region.start(), new_region.start());
|
|
558 }
|
|
559
|
141
|
560 if (!os::numa_has_static_binding()) {
|
|
561 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
|
|
562 // Invalid region is a range of memory that could've possibly
|
|
563 // been allocated on the other node. That's relevant only on Solaris where
|
|
564 // there is no static memory binding.
|
|
565 if (!invalid_region.is_empty()) {
|
|
566 merge_regions(new_region, &intersection, &invalid_region);
|
|
567 free_region(invalid_region);
|
|
568 ls->set_invalid_region(MemRegion());
|
|
569 }
|
0
|
570 }
|
141
|
571
|
0
|
572 select_tails(new_region, intersection, &bottom_region, &top_region);
|
141
|
573
|
|
574 if (!os::numa_has_static_binding()) {
|
|
575 // If that's a system with the first-touch policy then it's enough
|
|
576 // to free the pages.
|
|
577 free_region(bottom_region);
|
|
578 free_region(top_region);
|
|
579 } else {
|
|
580 // In a system with static binding we have to change the bias whenever
|
|
581 // we reshape the heap.
|
|
582 bias_region(bottom_region, ls->lgrp_id());
|
|
583 bias_region(top_region, ls->lgrp_id());
|
|
584 }
|
0
|
585
|
|
586 // If we clear the region, we would mangle it in debug. That would cause page
|
|
587 // allocation in a different place. Hence setting the top directly.
|
|
588 s->initialize(new_region, false);
|
|
589 s->set_top(s->bottom());
|
|
590
|
|
591 set_adaptation_cycles(samples_count());
|
|
592 }
|
|
593 }
|
|
594
|
|
595 // Set the top of the whole space.
|
|
596 // Mark the the holes in chunks below the top() as invalid.
|
|
597 void MutableNUMASpace::set_top(HeapWord* value) {
|
|
598 bool found_top = false;
|
|
599 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
600 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
601 MutableSpace *s = ls->space();
|
|
602 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
|
|
603
|
|
604 if (s->contains(value)) {
|
141
|
605 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
|
0
|
606 ls->add_invalid_region(MemRegion(top, value));
|
|
607 }
|
|
608 s->set_top(value);
|
|
609 found_top = true;
|
|
610 } else {
|
|
611 if (found_top) {
|
|
612 s->set_top(s->bottom());
|
|
613 } else {
|
141
|
614 if (!os::numa_has_static_binding() && top < s->end()) {
|
|
615 ls->add_invalid_region(MemRegion(top, s->end()));
|
|
616 }
|
|
617 s->set_top(s->end());
|
0
|
618 }
|
|
619 }
|
|
620 }
|
|
621 MutableSpace::set_top(value);
|
|
622 }
|
|
623
|
|
624 void MutableNUMASpace::clear() {
|
|
625 MutableSpace::set_top(bottom());
|
|
626 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
627 lgrp_spaces()->at(i)->space()->clear();
|
|
628 }
|
|
629 }
|
|
630
|
141
|
631 /*
|
|
632 Linux supports static memory binding, therefore the most part of the
|
|
633 logic dealing with the possible invalid page allocation is effectively
|
|
634 disabled. Besides there is no notion of the home node in Linux. A
|
|
635 thread is allowed to migrate freely. Although the scheduler is rather
|
|
636 reluctant to move threads between the nodes. We check for the current
|
|
637 node every allocation. And with a high probability a thread stays on
|
|
638 the same node for some time allowing local access to recently allocated
|
|
639 objects.
|
|
640 */
|
|
641
|
0
|
642 HeapWord* MutableNUMASpace::allocate(size_t size) {
|
141
|
643 Thread* thr = Thread::current();
|
|
644 int lgrp_id = thr->lgrp_id();
|
|
645 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
|
0
|
646 lgrp_id = os::numa_get_group_id();
|
141
|
647 thr->set_lgrp_id(lgrp_id);
|
0
|
648 }
|
|
649
|
|
650 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
|
651
|
|
652 // It is possible that a new CPU has been hotplugged and
|
|
653 // we haven't reshaped the space accordingly.
|
|
654 if (i == -1) {
|
|
655 i = os::random() % lgrp_spaces()->length();
|
|
656 }
|
|
657
|
|
658 MutableSpace *s = lgrp_spaces()->at(i)->space();
|
|
659 HeapWord *p = s->allocate(size);
|
|
660
|
|
661 if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
|
|
662 s->set_top(s->top() - size);
|
|
663 p = NULL;
|
|
664 }
|
|
665 if (p != NULL) {
|
|
666 if (top() < s->top()) { // Keep _top updated.
|
|
667 MutableSpace::set_top(s->top());
|
|
668 }
|
|
669 }
|
141
|
670 // Make the page allocation happen here if there is no static binding..
|
|
671 if (p != NULL && !os::numa_has_static_binding()) {
|
0
|
672 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
|
|
673 *(int*)i = 0;
|
|
674 }
|
|
675 }
|
|
676 return p;
|
|
677 }
|
|
678
|
|
679 // This version is lock-free.
|
|
680 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
141
|
681 Thread* thr = Thread::current();
|
|
682 int lgrp_id = thr->lgrp_id();
|
|
683 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
|
0
|
684 lgrp_id = os::numa_get_group_id();
|
141
|
685 thr->set_lgrp_id(lgrp_id);
|
0
|
686 }
|
|
687
|
|
688 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
|
689 // It is possible that a new CPU has been hotplugged and
|
|
690 // we haven't reshaped the space accordingly.
|
|
691 if (i == -1) {
|
|
692 i = os::random() % lgrp_spaces()->length();
|
|
693 }
|
|
694 MutableSpace *s = lgrp_spaces()->at(i)->space();
|
|
695 HeapWord *p = s->cas_allocate(size);
|
|
696 if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
|
|
697 if (s->cas_deallocate(p, size)) {
|
|
698 // We were the last to allocate and created a fragment less than
|
|
699 // a minimal object.
|
|
700 p = NULL;
|
|
701 }
|
|
702 }
|
|
703 if (p != NULL) {
|
|
704 HeapWord* cur_top, *cur_chunk_top = p + size;
|
|
705 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
|
|
706 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
|
|
707 break;
|
|
708 }
|
|
709 }
|
|
710 }
|
|
711
|
141
|
712 // Make the page allocation happen here if there is no static binding.
|
|
713 if (p != NULL && !os::numa_has_static_binding() ) {
|
0
|
714 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
|
|
715 *(int*)i = 0;
|
|
716 }
|
|
717 }
|
|
718 return p;
|
|
719 }
|
|
720
|
|
721 void MutableNUMASpace::print_short_on(outputStream* st) const {
|
|
722 MutableSpace::print_short_on(st);
|
|
723 st->print(" (");
|
|
724 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
725 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
|
|
726 lgrp_spaces()->at(i)->space()->print_short_on(st);
|
|
727 if (i < lgrp_spaces()->length() - 1) {
|
|
728 st->print(", ");
|
|
729 }
|
|
730 }
|
|
731 st->print(")");
|
|
732 }
|
|
733
|
|
734 void MutableNUMASpace::print_on(outputStream* st) const {
|
|
735 MutableSpace::print_on(st);
|
|
736 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
737 LGRPSpace *ls = lgrp_spaces()->at(i);
|
|
738 st->print(" lgrp %d", ls->lgrp_id());
|
|
739 ls->space()->print_on(st);
|
|
740 if (NUMAStats) {
|
|
741 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
|
|
742 ls->space_stats()->_local_space / K,
|
|
743 ls->space_stats()->_remote_space / K,
|
|
744 ls->space_stats()->_unbiased_space / K,
|
|
745 ls->space_stats()->_uncommited_space / K,
|
|
746 ls->space_stats()->_large_pages,
|
|
747 ls->space_stats()->_small_pages);
|
|
748 }
|
|
749 }
|
|
750 }
|
|
751
|
|
752 void MutableNUMASpace::verify(bool allow_dirty) const {
|
|
753 for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
|
754 lgrp_spaces()->at(i)->space()->verify(allow_dirty);
|
|
755 }
|
|
756 }
|
|
757
|
|
758 // Scan pages and gather stats about page placement and size.
|
|
759 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
|
|
760 clear_space_stats();
|
|
761 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
|
|
762 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
|
|
763 if (start < end) {
|
|
764 for (char *p = start; p < end;) {
|
|
765 os::page_info info;
|
|
766 if (os::get_page_info(p, &info)) {
|
|
767 if (info.size > 0) {
|
|
768 if (info.size > (size_t)os::vm_page_size()) {
|
|
769 space_stats()->_large_pages++;
|
|
770 } else {
|
|
771 space_stats()->_small_pages++;
|
|
772 }
|
|
773 if (info.lgrp_id == lgrp_id()) {
|
|
774 space_stats()->_local_space += info.size;
|
|
775 } else {
|
|
776 space_stats()->_remote_space += info.size;
|
|
777 }
|
|
778 p += info.size;
|
|
779 } else {
|
|
780 p += os::vm_page_size();
|
|
781 space_stats()->_uncommited_space += os::vm_page_size();
|
|
782 }
|
|
783 } else {
|
|
784 return;
|
|
785 }
|
|
786 }
|
|
787 }
|
|
788 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
|
|
789 pointer_delta(space()->end(), end, sizeof(char));
|
|
790
|
|
791 }
|
|
792
|
|
793 // Scan page_count pages and verify if they have the right size and right placement.
|
|
794 // If invalid pages are found they are freed in hope that subsequent reallocation
|
|
795 // will be more successful.
|
|
796 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
|
|
797 {
|
|
798 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
|
|
799 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
|
|
800
|
|
801 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
|
|
802 set_last_page_scanned(range_start);
|
|
803 }
|
|
804
|
|
805 char *scan_start = last_page_scanned();
|
|
806 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
|
|
807
|
|
808 os::page_info page_expected, page_found;
|
|
809 page_expected.size = page_size;
|
|
810 page_expected.lgrp_id = lgrp_id();
|
|
811
|
|
812 char *s = scan_start;
|
|
813 while (s < scan_end) {
|
|
814 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
|
|
815 if (e == NULL) {
|
|
816 break;
|
|
817 }
|
|
818 if (e != scan_end) {
|
|
819 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
|
|
820 && page_expected.size != 0) {
|
|
821 os::free_memory(s, pointer_delta(e, s, sizeof(char)));
|
|
822 }
|
|
823 page_expected = page_found;
|
|
824 }
|
|
825 s = e;
|
|
826 }
|
|
827
|
|
828 set_last_page_scanned(scan_end);
|
|
829 }
|