comparison src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children fcbfc50865ab
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1
2 /*
3 * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 */
25
26 # include "incls/_precompiled.incl"
27 # include "incls/_mutableNUMASpace.cpp.incl"
28
29
30 MutableNUMASpace::MutableNUMASpace() {
31 _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
32 _page_size = os::vm_page_size();
33 _adaptation_cycles = 0;
34 _samples_count = 0;
35 update_layout(true);
36 }
37
38 MutableNUMASpace::~MutableNUMASpace() {
39 for (int i = 0; i < lgrp_spaces()->length(); i++) {
40 delete lgrp_spaces()->at(i);
41 }
42 delete lgrp_spaces();
43 }
44
45 void MutableNUMASpace::mangle_unused_area() {
46 for (int i = 0; i < lgrp_spaces()->length(); i++) {
47 LGRPSpace *ls = lgrp_spaces()->at(i);
48 MutableSpace *s = ls->space();
49 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
50 if (top < s->end()) {
51 ls->add_invalid_region(MemRegion(top, s->end()));
52 }
53 s->mangle_unused_area();
54 }
55 }
56
57 // There may be unallocated holes in the middle chunks
58 // that should be filled with dead objects to ensure parseability.
59 void MutableNUMASpace::ensure_parsability() {
60 for (int i = 0; i < lgrp_spaces()->length(); i++) {
61 LGRPSpace *ls = lgrp_spaces()->at(i);
62 MutableSpace *s = ls->space();
63 if (!s->contains(top())) {
64 if (s->free_in_words() > 0) {
65 SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
66 size_t area_touched_words = pointer_delta(s->end(), s->top(), sizeof(HeapWordSize));
67 #ifndef ASSERT
68 if (!ZapUnusedHeapArea) {
69 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
70 area_touched_words);
71 }
72 #endif
73 MemRegion invalid;
74 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
75 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
76 os::vm_page_size());
77 if (crossing_start != crossing_end) {
78 // If object header crossed a small page boundary we mark the area
79 // as invalid rounding it to a page_size().
80 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
81 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
82 s->end());
83 invalid = MemRegion(start, end);
84 }
85
86 ls->add_invalid_region(invalid);
87 s->set_top(s->end());
88 }
89 } else {
90 #ifdef ASSERT
91 MemRegion invalid(s->top(), s->end());
92 ls->add_invalid_region(invalid);
93 #else
94 if (ZapUnusedHeapArea) {
95 MemRegion invalid(s->top(), s->end());
96 ls->add_invalid_region(invalid);
97 } else break;
98 #endif
99 }
100 }
101 }
102
103 size_t MutableNUMASpace::used_in_words() const {
104 size_t s = 0;
105 for (int i = 0; i < lgrp_spaces()->length(); i++) {
106 s += lgrp_spaces()->at(i)->space()->used_in_words();
107 }
108 return s;
109 }
110
111 size_t MutableNUMASpace::free_in_words() const {
112 size_t s = 0;
113 for (int i = 0; i < lgrp_spaces()->length(); i++) {
114 s += lgrp_spaces()->at(i)->space()->free_in_words();
115 }
116 return s;
117 }
118
119
120 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
121 guarantee(thr != NULL, "No thread");
122 int lgrp_id = thr->lgrp_id();
123 assert(lgrp_id != -1, "No lgrp_id set");
124 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
125 if (i == -1) {
126 return 0;
127 }
128 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
129 }
130
131 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
132 guarantee(thr != NULL, "No thread");
133 int lgrp_id = thr->lgrp_id();
134 assert(lgrp_id != -1, "No lgrp_id set");
135 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
136 if (i == -1) {
137 return 0;
138 }
139 return lgrp_spaces()->at(i)->space()->free_in_bytes();
140 }
141
142 // Check if the NUMA topology has changed. Add and remove spaces if needed.
143 // The update can be forced by setting the force parameter equal to true.
144 bool MutableNUMASpace::update_layout(bool force) {
145 // Check if the topology had changed.
146 bool changed = os::numa_topology_changed();
147 if (force || changed) {
148 // Compute lgrp intersection. Add/remove spaces.
149 int lgrp_limit = (int)os::numa_get_groups_num();
150 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
151 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
152 assert(lgrp_num > 0, "There should be at least one locality group");
153 // Add new spaces for the new nodes
154 for (int i = 0; i < lgrp_num; i++) {
155 bool found = false;
156 for (int j = 0; j < lgrp_spaces()->length(); j++) {
157 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
158 found = true;
159 break;
160 }
161 }
162 if (!found) {
163 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i]));
164 }
165 }
166
167 // Remove spaces for the removed nodes.
168 for (int i = 0; i < lgrp_spaces()->length();) {
169 bool found = false;
170 for (int j = 0; j < lgrp_num; j++) {
171 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
172 found = true;
173 break;
174 }
175 }
176 if (!found) {
177 delete lgrp_spaces()->at(i);
178 lgrp_spaces()->remove_at(i);
179 } else {
180 i++;
181 }
182 }
183
184 FREE_C_HEAP_ARRAY(int, lgrp_ids);
185
186 if (changed) {
187 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
188 thread->set_lgrp_id(-1);
189 }
190 }
191 return true;
192 }
193 return false;
194 }
195
196 // Bias region towards the first-touching lgrp. Set the right page sizes.
197 void MutableNUMASpace::bias_region(MemRegion mr) {
198 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
199 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
200 if (end > start) {
201 MemRegion aligned_region(start, end);
202 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
203 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
204 assert(region().contains(aligned_region), "Sanity");
205 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
206 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
207 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size());
208 }
209 }
210
211 // Free all pages in the region.
212 void MutableNUMASpace::free_region(MemRegion mr) {
213 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
214 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
215 if (end > start) {
216 MemRegion aligned_region(start, end);
217 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
218 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
219 assert(region().contains(aligned_region), "Sanity");
220 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
221 }
222 }
223
224 // Update space layout. Perform adaptation.
225 void MutableNUMASpace::update() {
226 if (update_layout(false)) {
227 // If the topology has changed, make all chunks zero-sized.
228 for (int i = 0; i < lgrp_spaces()->length(); i++) {
229 MutableSpace *s = lgrp_spaces()->at(i)->space();
230 s->set_end(s->bottom());
231 s->set_top(s->bottom());
232 }
233 initialize(region(), true);
234 } else {
235 bool should_initialize = false;
236 for (int i = 0; i < lgrp_spaces()->length(); i++) {
237 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
238 should_initialize = true;
239 break;
240 }
241 }
242
243 if (should_initialize ||
244 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
245 initialize(region(), true);
246 }
247 }
248
249 if (NUMAStats) {
250 for (int i = 0; i < lgrp_spaces()->length(); i++) {
251 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
252 }
253 }
254
255 scan_pages(NUMAPageScanRate);
256 }
257
258 // Scan pages. Free pages that have smaller size or wrong placement.
259 void MutableNUMASpace::scan_pages(size_t page_count)
260 {
261 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
262 if (pages_per_chunk > 0) {
263 for (int i = 0; i < lgrp_spaces()->length(); i++) {
264 LGRPSpace *ls = lgrp_spaces()->at(i);
265 ls->scan_pages(page_size(), pages_per_chunk);
266 }
267 }
268 }
269
270 // Accumulate statistics about the allocation rate of each lgrp.
271 void MutableNUMASpace::accumulate_statistics() {
272 if (UseAdaptiveNUMAChunkSizing) {
273 for (int i = 0; i < lgrp_spaces()->length(); i++) {
274 lgrp_spaces()->at(i)->sample();
275 }
276 increment_samples_count();
277 }
278
279 if (NUMAStats) {
280 for (int i = 0; i < lgrp_spaces()->length(); i++) {
281 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
282 }
283 }
284 }
285
286 // Get the current size of a chunk.
287 // This function computes the size of the chunk based on the
288 // difference between chunk ends. This allows it to work correctly in
289 // case the whole space is resized and during the process of adaptive
290 // chunk resizing.
291 size_t MutableNUMASpace::current_chunk_size(int i) {
292 HeapWord *cur_end, *prev_end;
293 if (i == 0) {
294 prev_end = bottom();
295 } else {
296 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
297 }
298 if (i == lgrp_spaces()->length() - 1) {
299 cur_end = end();
300 } else {
301 cur_end = lgrp_spaces()->at(i)->space()->end();
302 }
303 if (cur_end > prev_end) {
304 return pointer_delta(cur_end, prev_end, sizeof(char));
305 }
306 return 0;
307 }
308
309 // Return the default chunk size by equally diving the space.
310 // page_size() aligned.
311 size_t MutableNUMASpace::default_chunk_size() {
312 return base_space_size() / lgrp_spaces()->length() * page_size();
313 }
314
315 // Produce a new chunk size. page_size() aligned.
316 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
317 size_t pages_available = base_space_size();
318 for (int j = 0; j < i; j++) {
319 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
320 }
321 pages_available -= lgrp_spaces()->length() - i - 1;
322 assert(pages_available > 0, "No pages left");
323 float alloc_rate = 0;
324 for (int j = i; j < lgrp_spaces()->length(); j++) {
325 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
326 }
327 size_t chunk_size = 0;
328 if (alloc_rate > 0) {
329 LGRPSpace *ls = lgrp_spaces()->at(i);
330 chunk_size = (size_t)(ls->alloc_rate()->average() * pages_available / alloc_rate) * page_size();
331 }
332 chunk_size = MAX2(chunk_size, page_size());
333
334 if (limit > 0) {
335 limit = round_down(limit, page_size());
336 if (chunk_size > current_chunk_size(i)) {
337 chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit);
338 } else {
339 chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit);
340 }
341 }
342 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
343 return chunk_size;
344 }
345
346
347 // Return the bottom_region and the top_region. Align them to page_size() boundary.
348 // |------------------new_region---------------------------------|
349 // |----bottom_region--|---intersection---|------top_region------|
350 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
351 MemRegion* bottom_region, MemRegion *top_region) {
352 // Is there bottom?
353 if (new_region.start() < intersection.start()) { // Yes
354 // Try to coalesce small pages into a large one.
355 if (UseLargePages && page_size() >= os::large_page_size()) {
356 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), os::large_page_size());
357 if (new_region.contains(p)
358 && pointer_delta(p, new_region.start(), sizeof(char)) >= os::large_page_size()) {
359 if (intersection.contains(p)) {
360 intersection = MemRegion(p, intersection.end());
361 } else {
362 intersection = MemRegion(p, p);
363 }
364 }
365 }
366 *bottom_region = MemRegion(new_region.start(), intersection.start());
367 } else {
368 *bottom_region = MemRegion();
369 }
370
371 // Is there top?
372 if (intersection.end() < new_region.end()) { // Yes
373 // Try to coalesce small pages into a large one.
374 if (UseLargePages && page_size() >= os::large_page_size()) {
375 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), os::large_page_size());
376 if (new_region.contains(p)
377 && pointer_delta(new_region.end(), p, sizeof(char)) >= os::large_page_size()) {
378 if (intersection.contains(p)) {
379 intersection = MemRegion(intersection.start(), p);
380 } else {
381 intersection = MemRegion(p, p);
382 }
383 }
384 }
385 *top_region = MemRegion(intersection.end(), new_region.end());
386 } else {
387 *top_region = MemRegion();
388 }
389 }
390
391 // Try to merge the invalid region with the bottom or top region by decreasing
392 // the intersection area. Return the invalid_region aligned to the page_size()
393 // boundary if it's inside the intersection. Return non-empty invalid_region
394 // if it lies inside the intersection (also page-aligned).
395 // |------------------new_region---------------------------------|
396 // |----------------|-------invalid---|--------------------------|
397 // |----bottom_region--|---intersection---|------top_region------|
398 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
399 MemRegion *invalid_region) {
400 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
401 *intersection = MemRegion(invalid_region->end(), intersection->end());
402 *invalid_region = MemRegion();
403 } else
404 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
405 *intersection = MemRegion(intersection->start(), invalid_region->start());
406 *invalid_region = MemRegion();
407 } else
408 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
409 *intersection = MemRegion(new_region.start(), new_region.start());
410 *invalid_region = MemRegion();
411 } else
412 if (intersection->contains(invalid_region)) {
413 // That's the only case we have to make an additional bias_region() call.
414 HeapWord* start = invalid_region->start();
415 HeapWord* end = invalid_region->end();
416 if (UseLargePages && page_size() >= os::large_page_size()) {
417 HeapWord *p = (HeapWord*)round_down((intptr_t) start, os::large_page_size());
418 if (new_region.contains(p)) {
419 start = p;
420 }
421 p = (HeapWord*)round_to((intptr_t) end, os::large_page_size());
422 if (new_region.contains(end)) {
423 end = p;
424 }
425 }
426 if (intersection->start() > start) {
427 *intersection = MemRegion(start, intersection->end());
428 }
429 if (intersection->end() < end) {
430 *intersection = MemRegion(intersection->start(), end);
431 }
432 *invalid_region = MemRegion(start, end);
433 }
434 }
435
436 void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
437 assert(clear_space, "Reallocation will destory data!");
438 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
439
440 MemRegion old_region = region(), new_region;
441 set_bottom(mr.start());
442 set_end(mr.end());
443 MutableSpace::set_top(bottom());
444
445 // Compute chunk sizes
446 size_t prev_page_size = page_size();
447 set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size());
448 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
449 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
450 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
451
452 // Try small pages if the chunk size is too small
453 if (base_space_size_pages / lgrp_spaces()->length() == 0
454 && page_size() > (size_t)os::vm_page_size()) {
455 set_page_size(os::vm_page_size());
456 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
457 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
458 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
459 }
460 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
461 set_base_space_size(base_space_size_pages);
462
463 // Handle space resize
464 MemRegion top_region, bottom_region;
465 if (!old_region.equals(region())) {
466 new_region = MemRegion(rounded_bottom, rounded_end);
467 MemRegion intersection = new_region.intersection(old_region);
468 if (intersection.start() == NULL ||
469 intersection.end() == NULL ||
470 prev_page_size > page_size()) { // If the page size got smaller we have to change
471 // the page size preference for the whole space.
472 intersection = MemRegion(new_region.start(), new_region.start());
473 }
474 select_tails(new_region, intersection, &bottom_region, &top_region);
475 bias_region(bottom_region);
476 bias_region(top_region);
477 }
478
479 // Check if the space layout has changed significantly?
480 // This happens when the space has been resized so that either head or tail
481 // chunk became less than a page.
482 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
483 current_chunk_size(0) > page_size() &&
484 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
485
486
487 for (int i = 0; i < lgrp_spaces()->length(); i++) {
488 LGRPSpace *ls = lgrp_spaces()->at(i);
489 MutableSpace *s = ls->space();
490 old_region = s->region();
491
492 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
493 if (i < lgrp_spaces()->length() - 1) {
494 if (!UseAdaptiveNUMAChunkSizing ||
495 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
496 samples_count() < AdaptiveSizePolicyReadyThreshold) {
497 // No adaptation. Divide the space equally.
498 chunk_byte_size = default_chunk_size();
499 } else
500 if (!layout_valid || NUMASpaceResizeRate == 0) {
501 // Fast adaptation. If no space resize rate is set, resize
502 // the chunks instantly.
503 chunk_byte_size = adaptive_chunk_size(i, 0);
504 } else {
505 // Slow adaptation. Resize the chunks moving no more than
506 // NUMASpaceResizeRate bytes per collection.
507 size_t limit = NUMASpaceResizeRate /
508 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
509 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
510 }
511
512 assert(chunk_byte_size >= page_size(), "Chunk size too small");
513 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
514 }
515
516 if (i == 0) { // Bottom chunk
517 if (i != lgrp_spaces()->length() - 1) {
518 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
519 } else {
520 new_region = MemRegion(bottom(), end());
521 }
522 } else
523 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
524 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
525 new_region = MemRegion(ps->end(),
526 ps->end() + (chunk_byte_size >> LogHeapWordSize));
527 } else { // Top chunk
528 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
529 new_region = MemRegion(ps->end(), end());
530 }
531 guarantee(region().contains(new_region), "Region invariant");
532
533
534 // The general case:
535 // |---------------------|--invalid---|--------------------------|
536 // |------------------new_region---------------------------------|
537 // |----bottom_region--|---intersection---|------top_region------|
538 // |----old_region----|
539 // The intersection part has all pages in place we don't need to migrate them.
540 // Pages for the top and bottom part should be freed and then reallocated.
541
542 MemRegion intersection = old_region.intersection(new_region);
543
544 if (intersection.start() == NULL || intersection.end() == NULL) {
545 intersection = MemRegion(new_region.start(), new_region.start());
546 }
547
548 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
549 if (!invalid_region.is_empty()) {
550 merge_regions(new_region, &intersection, &invalid_region);
551 free_region(invalid_region);
552 }
553 select_tails(new_region, intersection, &bottom_region, &top_region);
554 free_region(bottom_region);
555 free_region(top_region);
556
557 // If we clear the region, we would mangle it in debug. That would cause page
558 // allocation in a different place. Hence setting the top directly.
559 s->initialize(new_region, false);
560 s->set_top(s->bottom());
561
562 ls->set_invalid_region(MemRegion());
563
564 set_adaptation_cycles(samples_count());
565 }
566 }
567
568 // Set the top of the whole space.
569 // Mark the the holes in chunks below the top() as invalid.
570 void MutableNUMASpace::set_top(HeapWord* value) {
571 bool found_top = false;
572 for (int i = 0; i < lgrp_spaces()->length(); i++) {
573 LGRPSpace *ls = lgrp_spaces()->at(i);
574 MutableSpace *s = ls->space();
575 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
576
577 if (s->contains(value)) {
578 if (top < value && top < s->end()) {
579 ls->add_invalid_region(MemRegion(top, value));
580 }
581 s->set_top(value);
582 found_top = true;
583 } else {
584 if (found_top) {
585 s->set_top(s->bottom());
586 } else {
587 if (top < s->end()) {
588 ls->add_invalid_region(MemRegion(top, s->end()));
589 }
590 s->set_top(s->end());
591 }
592 }
593 }
594 MutableSpace::set_top(value);
595 }
596
597 void MutableNUMASpace::clear() {
598 MutableSpace::set_top(bottom());
599 for (int i = 0; i < lgrp_spaces()->length(); i++) {
600 lgrp_spaces()->at(i)->space()->clear();
601 }
602 }
603
604 HeapWord* MutableNUMASpace::allocate(size_t size) {
605 int lgrp_id = Thread::current()->lgrp_id();
606 if (lgrp_id == -1) {
607 lgrp_id = os::numa_get_group_id();
608 Thread::current()->set_lgrp_id(lgrp_id);
609 }
610
611 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
612
613 // It is possible that a new CPU has been hotplugged and
614 // we haven't reshaped the space accordingly.
615 if (i == -1) {
616 i = os::random() % lgrp_spaces()->length();
617 }
618
619 MutableSpace *s = lgrp_spaces()->at(i)->space();
620 HeapWord *p = s->allocate(size);
621
622 if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
623 s->set_top(s->top() - size);
624 p = NULL;
625 }
626 if (p != NULL) {
627 if (top() < s->top()) { // Keep _top updated.
628 MutableSpace::set_top(s->top());
629 }
630 }
631 // Make the page allocation happen here.
632 if (p != NULL) {
633 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
634 *(int*)i = 0;
635 }
636 }
637
638 return p;
639 }
640
641 // This version is lock-free.
642 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
643 int lgrp_id = Thread::current()->lgrp_id();
644 if (lgrp_id == -1) {
645 lgrp_id = os::numa_get_group_id();
646 Thread::current()->set_lgrp_id(lgrp_id);
647 }
648
649 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
650 // It is possible that a new CPU has been hotplugged and
651 // we haven't reshaped the space accordingly.
652 if (i == -1) {
653 i = os::random() % lgrp_spaces()->length();
654 }
655 MutableSpace *s = lgrp_spaces()->at(i)->space();
656 HeapWord *p = s->cas_allocate(size);
657 if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
658 if (s->cas_deallocate(p, size)) {
659 // We were the last to allocate and created a fragment less than
660 // a minimal object.
661 p = NULL;
662 }
663 }
664 if (p != NULL) {
665 HeapWord* cur_top, *cur_chunk_top = p + size;
666 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
667 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
668 break;
669 }
670 }
671 }
672
673 // Make the page allocation happen here.
674 if (p != NULL) {
675 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
676 *(int*)i = 0;
677 }
678 }
679 return p;
680 }
681
682 void MutableNUMASpace::print_short_on(outputStream* st) const {
683 MutableSpace::print_short_on(st);
684 st->print(" (");
685 for (int i = 0; i < lgrp_spaces()->length(); i++) {
686 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
687 lgrp_spaces()->at(i)->space()->print_short_on(st);
688 if (i < lgrp_spaces()->length() - 1) {
689 st->print(", ");
690 }
691 }
692 st->print(")");
693 }
694
695 void MutableNUMASpace::print_on(outputStream* st) const {
696 MutableSpace::print_on(st);
697 for (int i = 0; i < lgrp_spaces()->length(); i++) {
698 LGRPSpace *ls = lgrp_spaces()->at(i);
699 st->print(" lgrp %d", ls->lgrp_id());
700 ls->space()->print_on(st);
701 if (NUMAStats) {
702 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
703 ls->space_stats()->_local_space / K,
704 ls->space_stats()->_remote_space / K,
705 ls->space_stats()->_unbiased_space / K,
706 ls->space_stats()->_uncommited_space / K,
707 ls->space_stats()->_large_pages,
708 ls->space_stats()->_small_pages);
709 }
710 }
711 }
712
713 void MutableNUMASpace::verify(bool allow_dirty) const {
714 for (int i = 0; i < lgrp_spaces()->length(); i++) {
715 lgrp_spaces()->at(i)->space()->verify(allow_dirty);
716 }
717 }
718
719 // Scan pages and gather stats about page placement and size.
720 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
721 clear_space_stats();
722 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
723 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
724 if (start < end) {
725 for (char *p = start; p < end;) {
726 os::page_info info;
727 if (os::get_page_info(p, &info)) {
728 if (info.size > 0) {
729 if (info.size > (size_t)os::vm_page_size()) {
730 space_stats()->_large_pages++;
731 } else {
732 space_stats()->_small_pages++;
733 }
734 if (info.lgrp_id == lgrp_id()) {
735 space_stats()->_local_space += info.size;
736 } else {
737 space_stats()->_remote_space += info.size;
738 }
739 p += info.size;
740 } else {
741 p += os::vm_page_size();
742 space_stats()->_uncommited_space += os::vm_page_size();
743 }
744 } else {
745 return;
746 }
747 }
748 }
749 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
750 pointer_delta(space()->end(), end, sizeof(char));
751
752 }
753
754 // Scan page_count pages and verify if they have the right size and right placement.
755 // If invalid pages are found they are freed in hope that subsequent reallocation
756 // will be more successful.
757 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
758 {
759 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
760 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
761
762 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
763 set_last_page_scanned(range_start);
764 }
765
766 char *scan_start = last_page_scanned();
767 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
768
769 os::page_info page_expected, page_found;
770 page_expected.size = page_size;
771 page_expected.lgrp_id = lgrp_id();
772
773 char *s = scan_start;
774 while (s < scan_end) {
775 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
776 if (e == NULL) {
777 break;
778 }
779 if (e != scan_end) {
780 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
781 && page_expected.size != 0) {
782 os::free_memory(s, pointer_delta(e, s, sizeof(char)));
783 }
784 page_expected = page_found;
785 }
786 s = e;
787 }
788
789 set_last_page_scanned(scan_end);
790 }