Mercurial > hg > graal-jvmci-8
annotate src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 10374:87c64c0438fb
6976350: G1: deal with fragmentation while copying objects during GC
Summary: Create G1ParGCAllocBufferContainer to contain two buffers instead of previously using one buffer, in order to hold the first priority buffer longer. Thus, when some large objects hits the value of free space left in the first priority buffer it has an alternative to fit in the second priority buffer while the first priority buffer is given more chances to try allocating smaller objects. Overall, it will improve heap space efficiency.
Reviewed-by: johnc, jmasa, brutisso
Contributed-by: tamao <tao.mao@oracle.com>
author | tamao |
---|---|
date | Mon, 03 Jun 2013 14:37:13 -0700 |
parents | ca9580859cf4 |
children | de6a9e811145 |
rev | line source |
---|---|
0 | 1 |
2 /* | |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
3 * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 * | |
6 * This code is free software; you can redistribute it and/or modify it | |
7 * under the terms of the GNU General Public License version 2 only, as | |
8 * published by the Free Software Foundation. | |
9 * | |
10 * This code is distributed in the hope that it will be useful, but WITHOUT | |
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 * version 2 for more details (a copy is included in the LICENSE file that | |
14 * accompanied this code). | |
15 * | |
16 * You should have received a copy of the GNU General Public License version | |
17 * 2 along with this work; if not, write to the Free Software Foundation, | |
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
21 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
22 * questions. |
0 | 23 * |
24 */ | |
25 | |
1972 | 26 #include "precompiled.hpp" |
27 #include "gc_implementation/shared/mutableNUMASpace.hpp" | |
28 #include "gc_implementation/shared/spaceDecorator.hpp" | |
29 #include "memory/sharedHeap.hpp" | |
30 #include "oops/oop.inline.hpp" | |
7180
f34d701e952e
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
6197
diff
changeset
|
31 #include "runtime/thread.inline.hpp" |
0 | 32 |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
33 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) { |
6197 | 34 _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true); |
0 | 35 _page_size = os::vm_page_size(); |
36 _adaptation_cycles = 0; | |
37 _samples_count = 0; | |
38 update_layout(true); | |
39 } | |
40 | |
41 MutableNUMASpace::~MutableNUMASpace() { | |
42 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
43 delete lgrp_spaces()->at(i); | |
44 } | |
45 delete lgrp_spaces(); | |
46 } | |
47 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
48 #ifndef PRODUCT |
0 | 49 void MutableNUMASpace::mangle_unused_area() { |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
50 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
51 // It can be called on a numa space during a full compaction. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
52 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
53 void MutableNUMASpace::mangle_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
54 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
55 // It can be called on a numa space during a full compaction. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
56 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
57 void MutableNUMASpace::mangle_region(MemRegion mr) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
58 // This method should do nothing because numa spaces are not mangled. |
0 | 59 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
60 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
61 assert(false, "Do not mangle MutableNUMASpace's"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
62 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
63 void MutableNUMASpace::set_top_for_allocations() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
64 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
65 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
66 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
67 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
68 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
69 void MutableNUMASpace::check_mangled_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
70 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
71 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
72 #endif // NOT_PRODUCT |
0 | 73 |
74 // There may be unallocated holes in the middle chunks | |
75 // that should be filled with dead objects to ensure parseability. | |
76 void MutableNUMASpace::ensure_parsability() { | |
77 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
78 LGRPSpace *ls = lgrp_spaces()->at(i); | |
79 MutableSpace *s = ls->space(); | |
605 | 80 if (s->top() < top()) { // For all spaces preceding the one containing top() |
0 | 81 if (s->free_in_words() > 0) { |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
82 intptr_t cur_top = (intptr_t)s->top(); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
83 size_t words_left_to_fill = pointer_delta(s->end(), s->top());; |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
84 while (words_left_to_fill > 0) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
85 size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
86 assert(words_to_fill >= CollectedHeap::min_fill_size(), |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
87 err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")", |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
88 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size())); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
89 CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
90 if (!os::numa_has_static_binding()) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
91 size_t touched_words = words_to_fill; |
0 | 92 #ifndef ASSERT |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
93 if (!ZapUnusedHeapArea) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
94 touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
95 touched_words); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
96 } |
0 | 97 #endif |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
98 MemRegion invalid; |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
99 HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
100 HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
101 if (crossing_start != crossing_end) { |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
102 // If object header crossed a small page boundary we mark the area |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
103 // as invalid rounding it to a page_size(). |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
104 HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
105 HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end()); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
106 invalid = MemRegion(start, end); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
107 } |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
108 |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
109 ls->add_invalid_region(invalid); |
141 | 110 } |
5965
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
111 cur_top = cur_top + (words_to_fill * HeapWordSize); |
cc74fa5a91a9
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
4734
diff
changeset
|
112 words_left_to_fill -= words_to_fill; |
0 | 113 } |
114 } | |
115 } else { | |
141 | 116 if (!os::numa_has_static_binding()) { |
0 | 117 #ifdef ASSERT |
118 MemRegion invalid(s->top(), s->end()); | |
119 ls->add_invalid_region(invalid); | |
141 | 120 #else |
121 if (ZapUnusedHeapArea) { | |
122 MemRegion invalid(s->top(), s->end()); | |
123 ls->add_invalid_region(invalid); | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
124 } else { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
125 return; |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
126 } |
0 | 127 #endif |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
128 } else { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
129 return; |
141 | 130 } |
0 | 131 } |
132 } | |
133 } | |
134 | |
135 size_t MutableNUMASpace::used_in_words() const { | |
136 size_t s = 0; | |
137 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
138 s += lgrp_spaces()->at(i)->space()->used_in_words(); | |
139 } | |
140 return s; | |
141 } | |
142 | |
143 size_t MutableNUMASpace::free_in_words() const { | |
144 size_t s = 0; | |
145 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
146 s += lgrp_spaces()->at(i)->space()->free_in_words(); | |
147 } | |
148 return s; | |
149 } | |
150 | |
151 | |
152 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { | |
153 guarantee(thr != NULL, "No thread"); | |
154 int lgrp_id = thr->lgrp_id(); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
155 if (lgrp_id == -1) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
156 // This case can occur after the topology of the system has |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
157 // changed. Thread can change their location, the new home |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
158 // group will be determined during the first allocation |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
159 // attempt. For now we can safely assume that all spaces |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
160 // have equal size because the whole space will be reinitialized. |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
161 if (lgrp_spaces()->length() > 0) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
162 return capacity_in_bytes() / lgrp_spaces()->length(); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
163 } else { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
164 assert(false, "There should be at least one locality group"); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
165 return 0; |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
166 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
167 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
168 // That's the normal case, where we know the locality group of the thread. |
0 | 169 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
170 if (i == -1) { | |
171 return 0; | |
172 } | |
173 return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); | |
174 } | |
175 | |
176 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
177 // Please see the comments for tlab_capacity(). |
0 | 178 guarantee(thr != NULL, "No thread"); |
179 int lgrp_id = thr->lgrp_id(); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
180 if (lgrp_id == -1) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
181 if (lgrp_spaces()->length() > 0) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
182 return free_in_bytes() / lgrp_spaces()->length(); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
183 } else { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
184 assert(false, "There should be at least one locality group"); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
185 return 0; |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
186 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
187 } |
0 | 188 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
189 if (i == -1) { | |
190 return 0; | |
191 } | |
192 return lgrp_spaces()->at(i)->space()->free_in_bytes(); | |
193 } | |
194 | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
195 |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
196 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
197 guarantee(thr != NULL, "No thread"); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
198 int lgrp_id = thr->lgrp_id(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
199 if (lgrp_id == -1) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
200 if (lgrp_spaces()->length() > 0) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
201 return capacity_in_words() / lgrp_spaces()->length(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
202 } else { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
203 assert(false, "There should be at least one locality group"); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
204 return 0; |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
205 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
206 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
207 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
208 if (i == -1) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
209 return 0; |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
210 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
211 return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
212 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
213 |
0 | 214 // Check if the NUMA topology has changed. Add and remove spaces if needed. |
215 // The update can be forced by setting the force parameter equal to true. | |
216 bool MutableNUMASpace::update_layout(bool force) { | |
217 // Check if the topology had changed. | |
218 bool changed = os::numa_topology_changed(); | |
219 if (force || changed) { | |
220 // Compute lgrp intersection. Add/remove spaces. | |
221 int lgrp_limit = (int)os::numa_get_groups_num(); | |
6197 | 222 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC); |
0 | 223 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); |
224 assert(lgrp_num > 0, "There should be at least one locality group"); | |
225 // Add new spaces for the new nodes | |
226 for (int i = 0; i < lgrp_num; i++) { | |
227 bool found = false; | |
228 for (int j = 0; j < lgrp_spaces()->length(); j++) { | |
229 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { | |
230 found = true; | |
231 break; | |
232 } | |
233 } | |
234 if (!found) { | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
235 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); |
0 | 236 } |
237 } | |
238 | |
239 // Remove spaces for the removed nodes. | |
240 for (int i = 0; i < lgrp_spaces()->length();) { | |
241 bool found = false; | |
242 for (int j = 0; j < lgrp_num; j++) { | |
243 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { | |
244 found = true; | |
245 break; | |
246 } | |
247 } | |
248 if (!found) { | |
249 delete lgrp_spaces()->at(i); | |
250 lgrp_spaces()->remove_at(i); | |
251 } else { | |
252 i++; | |
253 } | |
254 } | |
255 | |
6197 | 256 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC); |
0 | 257 |
258 if (changed) { | |
259 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { | |
260 thread->set_lgrp_id(-1); | |
261 } | |
262 } | |
263 return true; | |
264 } | |
265 return false; | |
266 } | |
267 | |
268 // Bias region towards the first-touching lgrp. Set the right page sizes. | |
141 | 269 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
0 | 270 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
271 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); | |
272 if (end > start) { | |
273 MemRegion aligned_region(start, end); | |
274 assert((intptr_t)aligned_region.start() % page_size() == 0 && | |
275 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); | |
276 assert(region().contains(aligned_region), "Sanity"); | |
141 | 277 // First we tell the OS which page size we want in the given range. The underlying |
278 // large page can be broken down if we require small pages. | |
0 | 279 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
141 | 280 // Then we uncommit the pages in the range. |
4734
20bfb6d15a94
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
3960
diff
changeset
|
281 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
141 | 282 // And make them local/first-touch biased. |
283 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); | |
0 | 284 } |
285 } | |
286 | |
287 // Free all pages in the region. | |
288 void MutableNUMASpace::free_region(MemRegion mr) { | |
289 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); | |
290 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); | |
291 if (end > start) { | |
292 MemRegion aligned_region(start, end); | |
293 assert((intptr_t)aligned_region.start() % page_size() == 0 && | |
294 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); | |
295 assert(region().contains(aligned_region), "Sanity"); | |
4734
20bfb6d15a94
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
3960
diff
changeset
|
296 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
0 | 297 } |
298 } | |
299 | |
300 // Update space layout. Perform adaptation. | |
301 void MutableNUMASpace::update() { | |
302 if (update_layout(false)) { | |
303 // If the topology has changed, make all chunks zero-sized. | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
304 // And clear the alloc-rate statistics. |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
305 // In future we may want to handle this more gracefully in order |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
306 // to avoid the reallocation of the pages as much as possible. |
0 | 307 for (int i = 0; i < lgrp_spaces()->length(); i++) { |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
308 LGRPSpace *ls = lgrp_spaces()->at(i); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
309 MutableSpace *s = ls->space(); |
0 | 310 s->set_end(s->bottom()); |
311 s->set_top(s->bottom()); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
312 ls->clear_alloc_rate(); |
0 | 313 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
314 // A NUMA space is never mangled |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
315 initialize(region(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
316 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
317 SpaceDecorator::DontMangle); |
0 | 318 } else { |
319 bool should_initialize = false; | |
141 | 320 if (!os::numa_has_static_binding()) { |
321 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
322 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { | |
323 should_initialize = true; | |
324 break; | |
325 } | |
0 | 326 } |
327 } | |
328 | |
329 if (should_initialize || | |
330 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
331 // A NUMA space is never mangled |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
332 initialize(region(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
333 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
334 SpaceDecorator::DontMangle); |
0 | 335 } |
336 } | |
337 | |
338 if (NUMAStats) { | |
339 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
340 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); | |
341 } | |
342 } | |
343 | |
344 scan_pages(NUMAPageScanRate); | |
345 } | |
346 | |
347 // Scan pages. Free pages that have smaller size or wrong placement. | |
348 void MutableNUMASpace::scan_pages(size_t page_count) | |
349 { | |
350 size_t pages_per_chunk = page_count / lgrp_spaces()->length(); | |
351 if (pages_per_chunk > 0) { | |
352 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
353 LGRPSpace *ls = lgrp_spaces()->at(i); | |
354 ls->scan_pages(page_size(), pages_per_chunk); | |
355 } | |
356 } | |
357 } | |
358 | |
359 // Accumulate statistics about the allocation rate of each lgrp. | |
360 void MutableNUMASpace::accumulate_statistics() { | |
361 if (UseAdaptiveNUMAChunkSizing) { | |
362 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
363 lgrp_spaces()->at(i)->sample(); | |
364 } | |
365 increment_samples_count(); | |
366 } | |
367 | |
368 if (NUMAStats) { | |
369 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
370 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); | |
371 } | |
372 } | |
373 } | |
374 | |
375 // Get the current size of a chunk. | |
376 // This function computes the size of the chunk based on the | |
377 // difference between chunk ends. This allows it to work correctly in | |
378 // case the whole space is resized and during the process of adaptive | |
379 // chunk resizing. | |
380 size_t MutableNUMASpace::current_chunk_size(int i) { | |
381 HeapWord *cur_end, *prev_end; | |
382 if (i == 0) { | |
383 prev_end = bottom(); | |
384 } else { | |
385 prev_end = lgrp_spaces()->at(i - 1)->space()->end(); | |
386 } | |
387 if (i == lgrp_spaces()->length() - 1) { | |
388 cur_end = end(); | |
389 } else { | |
390 cur_end = lgrp_spaces()->at(i)->space()->end(); | |
391 } | |
392 if (cur_end > prev_end) { | |
393 return pointer_delta(cur_end, prev_end, sizeof(char)); | |
394 } | |
395 return 0; | |
396 } | |
397 | |
398 // Return the default chunk size by equally diving the space. | |
399 // page_size() aligned. | |
400 size_t MutableNUMASpace::default_chunk_size() { | |
401 return base_space_size() / lgrp_spaces()->length() * page_size(); | |
402 } | |
403 | |
404 // Produce a new chunk size. page_size() aligned. | |
391
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
405 // This function is expected to be called on sequence of i's from 0 to |
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
406 // lgrp_spaces()->length(). |
0 | 407 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
408 size_t pages_available = base_space_size(); | |
409 for (int j = 0; j < i; j++) { | |
410 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size(); | |
411 } | |
412 pages_available -= lgrp_spaces()->length() - i - 1; | |
413 assert(pages_available > 0, "No pages left"); | |
414 float alloc_rate = 0; | |
415 for (int j = i; j < lgrp_spaces()->length(); j++) { | |
416 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); | |
417 } | |
418 size_t chunk_size = 0; | |
419 if (alloc_rate > 0) { | |
420 LGRPSpace *ls = lgrp_spaces()->at(i); | |
391
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
421 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
0 | 422 } |
423 chunk_size = MAX2(chunk_size, page_size()); | |
424 | |
425 if (limit > 0) { | |
426 limit = round_down(limit, page_size()); | |
427 if (chunk_size > current_chunk_size(i)) { | |
462
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
428 size_t upper_bound = pages_available * page_size(); |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
429 if (upper_bound > limit && |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
430 current_chunk_size(i) < upper_bound - limit) { |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
431 // The resulting upper bound should not exceed the available |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
432 // amount of memory (pages_available * page_size()). |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
433 upper_bound = current_chunk_size(i) + limit; |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
434 } |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
435 chunk_size = MIN2(chunk_size, upper_bound); |
0 | 436 } else { |
462
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
437 size_t lower_bound = page_size(); |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
438 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
439 lower_bound = current_chunk_size(i) - limit; |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
440 } |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
441 chunk_size = MAX2(chunk_size, lower_bound); |
0 | 442 } |
443 } | |
444 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); | |
445 return chunk_size; | |
446 } | |
447 | |
448 | |
449 // Return the bottom_region and the top_region. Align them to page_size() boundary. | |
450 // |------------------new_region---------------------------------| | |
451 // |----bottom_region--|---intersection---|------top_region------| | |
452 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, | |
453 MemRegion* bottom_region, MemRegion *top_region) { | |
454 // Is there bottom? | |
455 if (new_region.start() < intersection.start()) { // Yes | |
456 // Try to coalesce small pages into a large one. | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
457 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
458 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment()); |
0 | 459 if (new_region.contains(p) |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
460 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { |
0 | 461 if (intersection.contains(p)) { |
462 intersection = MemRegion(p, intersection.end()); | |
463 } else { | |
464 intersection = MemRegion(p, p); | |
465 } | |
466 } | |
467 } | |
468 *bottom_region = MemRegion(new_region.start(), intersection.start()); | |
469 } else { | |
470 *bottom_region = MemRegion(); | |
471 } | |
472 | |
473 // Is there top? | |
474 if (intersection.end() < new_region.end()) { // Yes | |
475 // Try to coalesce small pages into a large one. | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
476 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
477 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment()); |
0 | 478 if (new_region.contains(p) |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
479 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { |
0 | 480 if (intersection.contains(p)) { |
481 intersection = MemRegion(intersection.start(), p); | |
482 } else { | |
483 intersection = MemRegion(p, p); | |
484 } | |
485 } | |
486 } | |
487 *top_region = MemRegion(intersection.end(), new_region.end()); | |
488 } else { | |
489 *top_region = MemRegion(); | |
490 } | |
491 } | |
492 | |
493 // Try to merge the invalid region with the bottom or top region by decreasing | |
494 // the intersection area. Return the invalid_region aligned to the page_size() | |
495 // boundary if it's inside the intersection. Return non-empty invalid_region | |
496 // if it lies inside the intersection (also page-aligned). | |
497 // |------------------new_region---------------------------------| | |
498 // |----------------|-------invalid---|--------------------------| | |
499 // |----bottom_region--|---intersection---|------top_region------| | |
500 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, | |
501 MemRegion *invalid_region) { | |
502 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { | |
503 *intersection = MemRegion(invalid_region->end(), intersection->end()); | |
504 *invalid_region = MemRegion(); | |
505 } else | |
506 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { | |
507 *intersection = MemRegion(intersection->start(), invalid_region->start()); | |
508 *invalid_region = MemRegion(); | |
509 } else | |
510 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { | |
511 *intersection = MemRegion(new_region.start(), new_region.start()); | |
512 *invalid_region = MemRegion(); | |
513 } else | |
514 if (intersection->contains(invalid_region)) { | |
515 // That's the only case we have to make an additional bias_region() call. | |
516 HeapWord* start = invalid_region->start(); | |
517 HeapWord* end = invalid_region->end(); | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
518 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
519 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment()); |
0 | 520 if (new_region.contains(p)) { |
521 start = p; | |
522 } | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
523 p = (HeapWord*)round_to((intptr_t) end, alignment()); |
0 | 524 if (new_region.contains(end)) { |
525 end = p; | |
526 } | |
527 } | |
528 if (intersection->start() > start) { | |
529 *intersection = MemRegion(start, intersection->end()); | |
530 } | |
531 if (intersection->end() < end) { | |
532 *intersection = MemRegion(intersection->start(), end); | |
533 } | |
534 *invalid_region = MemRegion(start, end); | |
535 } | |
536 } | |
537 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
538 void MutableNUMASpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
539 bool clear_space, |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
540 bool mangle_space, |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
541 bool setup_pages) { |
0 | 542 assert(clear_space, "Reallocation will destory data!"); |
543 assert(lgrp_spaces()->length() > 0, "There should be at least one space"); | |
544 | |
545 MemRegion old_region = region(), new_region; | |
546 set_bottom(mr.start()); | |
547 set_end(mr.end()); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
548 // Must always clear the space |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
549 clear(SpaceDecorator::DontMangle); |
0 | 550 |
551 // Compute chunk sizes | |
552 size_t prev_page_size = page_size(); | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
553 set_page_size(UseLargePages ? alignment() : os::vm_page_size()); |
0 | 554 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
555 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); | |
556 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); | |
557 | |
558 // Try small pages if the chunk size is too small | |
559 if (base_space_size_pages / lgrp_spaces()->length() == 0 | |
560 && page_size() > (size_t)os::vm_page_size()) { | |
561 set_page_size(os::vm_page_size()); | |
562 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); | |
563 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); | |
564 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); | |
565 } | |
566 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); | |
567 set_base_space_size(base_space_size_pages); | |
568 | |
569 // Handle space resize | |
570 MemRegion top_region, bottom_region; | |
571 if (!old_region.equals(region())) { | |
572 new_region = MemRegion(rounded_bottom, rounded_end); | |
573 MemRegion intersection = new_region.intersection(old_region); | |
574 if (intersection.start() == NULL || | |
575 intersection.end() == NULL || | |
576 prev_page_size > page_size()) { // If the page size got smaller we have to change | |
577 // the page size preference for the whole space. | |
578 intersection = MemRegion(new_region.start(), new_region.start()); | |
579 } | |
580 select_tails(new_region, intersection, &bottom_region, &top_region); | |
141 | 581 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
582 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); | |
0 | 583 } |
584 | |
585 // Check if the space layout has changed significantly? | |
586 // This happens when the space has been resized so that either head or tail | |
587 // chunk became less than a page. | |
588 bool layout_valid = UseAdaptiveNUMAChunkSizing && | |
589 current_chunk_size(0) > page_size() && | |
590 current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); | |
591 | |
592 | |
593 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
594 LGRPSpace *ls = lgrp_spaces()->at(i); | |
595 MutableSpace *s = ls->space(); | |
596 old_region = s->region(); | |
597 | |
598 size_t chunk_byte_size = 0, old_chunk_byte_size = 0; | |
599 if (i < lgrp_spaces()->length() - 1) { | |
600 if (!UseAdaptiveNUMAChunkSizing || | |
601 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || | |
602 samples_count() < AdaptiveSizePolicyReadyThreshold) { | |
603 // No adaptation. Divide the space equally. | |
604 chunk_byte_size = default_chunk_size(); | |
605 } else | |
606 if (!layout_valid || NUMASpaceResizeRate == 0) { | |
607 // Fast adaptation. If no space resize rate is set, resize | |
608 // the chunks instantly. | |
609 chunk_byte_size = adaptive_chunk_size(i, 0); | |
610 } else { | |
611 // Slow adaptation. Resize the chunks moving no more than | |
612 // NUMASpaceResizeRate bytes per collection. | |
613 size_t limit = NUMASpaceResizeRate / | |
614 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); | |
615 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); | |
616 } | |
617 | |
618 assert(chunk_byte_size >= page_size(), "Chunk size too small"); | |
619 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check"); | |
620 } | |
621 | |
622 if (i == 0) { // Bottom chunk | |
623 if (i != lgrp_spaces()->length() - 1) { | |
624 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); | |
625 } else { | |
626 new_region = MemRegion(bottom(), end()); | |
627 } | |
628 } else | |
629 if (i < lgrp_spaces()->length() - 1) { // Middle chunks | |
630 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); | |
631 new_region = MemRegion(ps->end(), | |
632 ps->end() + (chunk_byte_size >> LogHeapWordSize)); | |
633 } else { // Top chunk | |
634 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); | |
635 new_region = MemRegion(ps->end(), end()); | |
636 } | |
637 guarantee(region().contains(new_region), "Region invariant"); | |
638 | |
639 | |
640 // The general case: | |
641 // |---------------------|--invalid---|--------------------------| | |
642 // |------------------new_region---------------------------------| | |
643 // |----bottom_region--|---intersection---|------top_region------| | |
644 // |----old_region----| | |
645 // The intersection part has all pages in place we don't need to migrate them. | |
646 // Pages for the top and bottom part should be freed and then reallocated. | |
647 | |
648 MemRegion intersection = old_region.intersection(new_region); | |
649 | |
650 if (intersection.start() == NULL || intersection.end() == NULL) { | |
651 intersection = MemRegion(new_region.start(), new_region.start()); | |
652 } | |
653 | |
141 | 654 if (!os::numa_has_static_binding()) { |
655 MemRegion invalid_region = ls->invalid_region().intersection(new_region); | |
656 // Invalid region is a range of memory that could've possibly | |
657 // been allocated on the other node. That's relevant only on Solaris where | |
658 // there is no static memory binding. | |
659 if (!invalid_region.is_empty()) { | |
660 merge_regions(new_region, &intersection, &invalid_region); | |
661 free_region(invalid_region); | |
662 ls->set_invalid_region(MemRegion()); | |
663 } | |
0 | 664 } |
141 | 665 |
0 | 666 select_tails(new_region, intersection, &bottom_region, &top_region); |
141 | 667 |
668 if (!os::numa_has_static_binding()) { | |
669 // If that's a system with the first-touch policy then it's enough | |
670 // to free the pages. | |
671 free_region(bottom_region); | |
672 free_region(top_region); | |
673 } else { | |
674 // In a system with static binding we have to change the bias whenever | |
675 // we reshape the heap. | |
676 bias_region(bottom_region, ls->lgrp_id()); | |
677 bias_region(top_region, ls->lgrp_id()); | |
678 } | |
0 | 679 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
680 // Clear space (set top = bottom) but never mangle. |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
681 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); |
0 | 682 |
683 set_adaptation_cycles(samples_count()); | |
684 } | |
685 } | |
686 | |
687 // Set the top of the whole space. | |
688 // Mark the the holes in chunks below the top() as invalid. | |
689 void MutableNUMASpace::set_top(HeapWord* value) { | |
690 bool found_top = false; | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
691 for (int i = 0; i < lgrp_spaces()->length();) { |
0 | 692 LGRPSpace *ls = lgrp_spaces()->at(i); |
693 MutableSpace *s = ls->space(); | |
694 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); | |
695 | |
696 if (s->contains(value)) { | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
697 // Check if setting the chunk's top to a given value would create a hole less than |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
698 // a minimal object; assuming that's not the last chunk in which case we don't care. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
699 if (i < lgrp_spaces()->length() - 1) { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
700 size_t remainder = pointer_delta(s->end(), value); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
701 const size_t min_fill_size = CollectedHeap::min_fill_size(); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
702 if (remainder < min_fill_size && remainder > 0) { |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
703 // Add a minimum size filler object; it will cross the chunk boundary. |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
704 CollectedHeap::fill_with_object(value, min_fill_size); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
705 value += min_fill_size; |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
706 assert(!s->contains(value), "Should be in the next chunk"); |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
707 // Restart the loop from the same chunk, since the value has moved |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
708 // to the next one. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
709 continue; |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
710 } |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
711 } |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
712 |
141 | 713 if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
0 | 714 ls->add_invalid_region(MemRegion(top, value)); |
715 } | |
716 s->set_top(value); | |
717 found_top = true; | |
718 } else { | |
719 if (found_top) { | |
720 s->set_top(s->bottom()); | |
721 } else { | |
141 | 722 if (!os::numa_has_static_binding() && top < s->end()) { |
723 ls->add_invalid_region(MemRegion(top, s->end())); | |
724 } | |
725 s->set_top(s->end()); | |
0 | 726 } |
727 } | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
728 i++; |
0 | 729 } |
730 MutableSpace::set_top(value); | |
731 } | |
732 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
733 void MutableNUMASpace::clear(bool mangle_space) { |
0 | 734 MutableSpace::set_top(bottom()); |
735 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
736 // Never mangle NUMA spaces because the mangling will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
737 // bind the memory to a possibly unwanted lgroup. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
738 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
0 | 739 } |
740 } | |
741 | |
141 | 742 /* |
743 Linux supports static memory binding, therefore the most part of the | |
744 logic dealing with the possible invalid page allocation is effectively | |
745 disabled. Besides there is no notion of the home node in Linux. A | |
746 thread is allowed to migrate freely. Although the scheduler is rather | |
747 reluctant to move threads between the nodes. We check for the current | |
748 node every allocation. And with a high probability a thread stays on | |
749 the same node for some time allowing local access to recently allocated | |
750 objects. | |
751 */ | |
752 | |
0 | 753 HeapWord* MutableNUMASpace::allocate(size_t size) { |
141 | 754 Thread* thr = Thread::current(); |
755 int lgrp_id = thr->lgrp_id(); | |
756 if (lgrp_id == -1 || !os::numa_has_group_homing()) { | |
0 | 757 lgrp_id = os::numa_get_group_id(); |
141 | 758 thr->set_lgrp_id(lgrp_id); |
0 | 759 } |
760 | |
761 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
762 | |
763 // It is possible that a new CPU has been hotplugged and | |
764 // we haven't reshaped the space accordingly. | |
765 if (i == -1) { | |
766 i = os::random() % lgrp_spaces()->length(); | |
767 } | |
768 | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
769 LGRPSpace* ls = lgrp_spaces()->at(i); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
770 MutableSpace *s = ls->space(); |
0 | 771 HeapWord *p = s->allocate(size); |
772 | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
773 if (p != NULL) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
774 size_t remainder = s->free_in_words(); |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
628
diff
changeset
|
775 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
776 s->set_top(s->top() - size); |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
777 p = NULL; |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
778 } |
0 | 779 } |
780 if (p != NULL) { | |
781 if (top() < s->top()) { // Keep _top updated. | |
782 MutableSpace::set_top(s->top()); | |
783 } | |
784 } | |
141 | 785 // Make the page allocation happen here if there is no static binding.. |
786 if (p != NULL && !os::numa_has_static_binding()) { | |
0 | 787 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
788 *(int*)i = 0; | |
789 } | |
790 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
791 if (p == NULL) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
792 ls->set_allocation_failed(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
793 } |
0 | 794 return p; |
795 } | |
796 | |
797 // This version is lock-free. | |
798 HeapWord* MutableNUMASpace::cas_allocate(size_t size) { | |
141 | 799 Thread* thr = Thread::current(); |
800 int lgrp_id = thr->lgrp_id(); | |
801 if (lgrp_id == -1 || !os::numa_has_group_homing()) { | |
0 | 802 lgrp_id = os::numa_get_group_id(); |
141 | 803 thr->set_lgrp_id(lgrp_id); |
0 | 804 } |
805 | |
806 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
807 // It is possible that a new CPU has been hotplugged and | |
808 // we haven't reshaped the space accordingly. | |
809 if (i == -1) { | |
810 i = os::random() % lgrp_spaces()->length(); | |
811 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
812 LGRPSpace *ls = lgrp_spaces()->at(i); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
813 MutableSpace *s = ls->space(); |
0 | 814 HeapWord *p = s->cas_allocate(size); |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
815 if (p != NULL) { |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
816 size_t remainder = pointer_delta(s->end(), p + size); |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
628
diff
changeset
|
817 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
818 if (s->cas_deallocate(p, size)) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
819 // We were the last to allocate and created a fragment less than |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
820 // a minimal object. |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
821 p = NULL; |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
822 } else { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
823 guarantee(false, "Deallocation should always succeed"); |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
824 } |
0 | 825 } |
826 } | |
827 if (p != NULL) { | |
828 HeapWord* cur_top, *cur_chunk_top = p + size; | |
829 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. | |
830 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { | |
831 break; | |
832 } | |
833 } | |
834 } | |
835 | |
141 | 836 // Make the page allocation happen here if there is no static binding. |
837 if (p != NULL && !os::numa_has_static_binding() ) { | |
0 | 838 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
839 *(int*)i = 0; | |
840 } | |
841 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
842 if (p == NULL) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
843 ls->set_allocation_failed(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
844 } |
0 | 845 return p; |
846 } | |
847 | |
848 void MutableNUMASpace::print_short_on(outputStream* st) const { | |
849 MutableSpace::print_short_on(st); | |
850 st->print(" ("); | |
851 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
852 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id()); | |
853 lgrp_spaces()->at(i)->space()->print_short_on(st); | |
854 if (i < lgrp_spaces()->length() - 1) { | |
855 st->print(", "); | |
856 } | |
857 } | |
858 st->print(")"); | |
859 } | |
860 | |
861 void MutableNUMASpace::print_on(outputStream* st) const { | |
862 MutableSpace::print_on(st); | |
863 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
864 LGRPSpace *ls = lgrp_spaces()->at(i); | |
865 st->print(" lgrp %d", ls->lgrp_id()); | |
866 ls->space()->print_on(st); | |
867 if (NUMAStats) { | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
868 for (int i = 0; i < lgrp_spaces()->length(); i++) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
869 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
870 } |
0 | 871 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n", |
872 ls->space_stats()->_local_space / K, | |
873 ls->space_stats()->_remote_space / K, | |
874 ls->space_stats()->_unbiased_space / K, | |
875 ls->space_stats()->_uncommited_space / K, | |
876 ls->space_stats()->_large_pages, | |
877 ls->space_stats()->_small_pages); | |
878 } | |
879 } | |
880 } | |
881 | |
6008 | 882 void MutableNUMASpace::verify() { |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
883 // This can be called after setting an arbitary value to the space's top, |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
884 // so an object can cross the chunk boundary. We ensure the parsablity |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
885 // of the space and just walk the objects in linear fashion. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
886 ensure_parsability(); |
6008 | 887 MutableSpace::verify(); |
0 | 888 } |
889 | |
890 // Scan pages and gather stats about page placement and size. | |
891 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { | |
892 clear_space_stats(); | |
893 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size); | |
894 char* end = (char*)round_down((intptr_t) space()->end(), page_size); | |
895 if (start < end) { | |
896 for (char *p = start; p < end;) { | |
897 os::page_info info; | |
898 if (os::get_page_info(p, &info)) { | |
899 if (info.size > 0) { | |
900 if (info.size > (size_t)os::vm_page_size()) { | |
901 space_stats()->_large_pages++; | |
902 } else { | |
903 space_stats()->_small_pages++; | |
904 } | |
905 if (info.lgrp_id == lgrp_id()) { | |
906 space_stats()->_local_space += info.size; | |
907 } else { | |
908 space_stats()->_remote_space += info.size; | |
909 } | |
910 p += info.size; | |
911 } else { | |
912 p += os::vm_page_size(); | |
913 space_stats()->_uncommited_space += os::vm_page_size(); | |
914 } | |
915 } else { | |
916 return; | |
917 } | |
918 } | |
919 } | |
920 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + | |
921 pointer_delta(space()->end(), end, sizeof(char)); | |
922 | |
923 } | |
924 | |
925 // Scan page_count pages and verify if they have the right size and right placement. | |
926 // If invalid pages are found they are freed in hope that subsequent reallocation | |
927 // will be more successful. | |
928 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) | |
929 { | |
930 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size); | |
931 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size); | |
932 | |
933 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { | |
934 set_last_page_scanned(range_start); | |
935 } | |
936 | |
937 char *scan_start = last_page_scanned(); | |
938 char* scan_end = MIN2(scan_start + page_size * page_count, range_end); | |
939 | |
940 os::page_info page_expected, page_found; | |
941 page_expected.size = page_size; | |
942 page_expected.lgrp_id = lgrp_id(); | |
943 | |
944 char *s = scan_start; | |
945 while (s < scan_end) { | |
946 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); | |
947 if (e == NULL) { | |
948 break; | |
949 } | |
950 if (e != scan_end) { | |
8739
ca9580859cf4
8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents:
7180
diff
changeset
|
951 assert(e < scan_end, err_msg("e: " PTR_FORMAT " scan_end: " PTR_FORMAT, e, scan_end)); |
ca9580859cf4
8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents:
7180
diff
changeset
|
952 |
0 | 953 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) |
954 && page_expected.size != 0) { | |
4734
20bfb6d15a94
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
3960
diff
changeset
|
955 os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size); |
0 | 956 } |
957 page_expected = page_found; | |
958 } | |
959 s = e; | |
960 } | |
961 | |
962 set_last_page_scanned(scan_end); | |
963 } |