Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @ 1842:6e0aac35bfa9
6980838: G1: guarantee(false) failed: thread has an unexpected active value in its SATB queue
Summary: Under certain circumstances a safepoint could happen between a JavaThread object being created and that object being added to the Java threads list. This could cause the active field of that thread's SATB queue to get out-of-sync with respect to the other Java threads. The solution is to activate the SATB queue, when necessary, before adding the thread to the Java threads list, not when the JavaThread object is created. The changeset also includes a small fix to rename the surrogate locker thread from "Surrogate Locker Thread (CMS)" to "Surrogate Locker Thread (Concurrent GC)" since it's also used in G1.
Reviewed-by: iveresov, ysr, johnc, jcoomes
author | tonyp |
---|---|
date | Fri, 01 Oct 2010 16:43:05 -0400 |
parents | e9ff18c4ace7 |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 |
2 /* | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
3 * Copyright (c) 2006, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 * | |
6 * This code is free software; you can redistribute it and/or modify it | |
7 * under the terms of the GNU General Public License version 2 only, as | |
8 * published by the Free Software Foundation. | |
9 * | |
10 * This code is distributed in the hope that it will be useful, but WITHOUT | |
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 * version 2 for more details (a copy is included in the LICENSE file that | |
14 * accompanied this code). | |
15 * | |
16 * You should have received a copy of the GNU General Public License version | |
17 * 2 along with this work; if not, write to the Free Software Foundation, | |
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
21 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
628
diff
changeset
|
22 * questions. |
0 | 23 * |
24 */ | |
25 | |
26 # include "incls/_precompiled.incl" | |
27 # include "incls/_mutableNUMASpace.cpp.incl" | |
28 | |
29 | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
30 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) { |
0 | 31 _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true); |
32 _page_size = os::vm_page_size(); | |
33 _adaptation_cycles = 0; | |
34 _samples_count = 0; | |
35 update_layout(true); | |
36 } | |
37 | |
38 MutableNUMASpace::~MutableNUMASpace() { | |
39 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
40 delete lgrp_spaces()->at(i); | |
41 } | |
42 delete lgrp_spaces(); | |
43 } | |
44 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
45 #ifndef PRODUCT |
0 | 46 void MutableNUMASpace::mangle_unused_area() { |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
47 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
48 // It can be called on a numa space during a full compaction. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
49 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
50 void MutableNUMASpace::mangle_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
51 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
52 // It can be called on a numa space during a full compaction. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
53 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
54 void MutableNUMASpace::mangle_region(MemRegion mr) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
55 // This method should do nothing because numa spaces are not mangled. |
0 | 56 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
57 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
58 assert(false, "Do not mangle MutableNUMASpace's"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
59 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
60 void MutableNUMASpace::set_top_for_allocations() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
61 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
62 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
63 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
64 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
65 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
66 void MutableNUMASpace::check_mangled_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
67 // This method should do nothing. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
68 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
69 #endif // NOT_PRODUCT |
0 | 70 |
71 // There may be unallocated holes in the middle chunks | |
72 // that should be filled with dead objects to ensure parseability. | |
73 void MutableNUMASpace::ensure_parsability() { | |
74 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
75 LGRPSpace *ls = lgrp_spaces()->at(i); | |
76 MutableSpace *s = ls->space(); | |
605 | 77 if (s->top() < top()) { // For all spaces preceding the one containing top() |
0 | 78 if (s->free_in_words() > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
79 size_t area_touched_words = pointer_delta(s->end(), s->top()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
80 CollectedHeap::fill_with_object(s->top(), area_touched_words); |
0 | 81 #ifndef ASSERT |
82 if (!ZapUnusedHeapArea) { | |
83 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), | |
84 area_touched_words); | |
85 } | |
86 #endif | |
141 | 87 if (!os::numa_has_static_binding()) { |
88 MemRegion invalid; | |
89 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); | |
90 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), | |
91 os::vm_page_size()); | |
92 if (crossing_start != crossing_end) { | |
93 // If object header crossed a small page boundary we mark the area | |
94 // as invalid rounding it to a page_size(). | |
95 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); | |
96 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), | |
97 s->end()); | |
98 invalid = MemRegion(start, end); | |
99 } | |
100 | |
101 ls->add_invalid_region(invalid); | |
0 | 102 } |
103 } | |
104 } else { | |
141 | 105 if (!os::numa_has_static_binding()) { |
0 | 106 #ifdef ASSERT |
107 MemRegion invalid(s->top(), s->end()); | |
108 ls->add_invalid_region(invalid); | |
141 | 109 #else |
110 if (ZapUnusedHeapArea) { | |
111 MemRegion invalid(s->top(), s->end()); | |
112 ls->add_invalid_region(invalid); | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
113 } else { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
114 return; |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
115 } |
0 | 116 #endif |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
117 } else { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
118 return; |
141 | 119 } |
0 | 120 } |
121 } | |
122 } | |
123 | |
124 size_t MutableNUMASpace::used_in_words() const { | |
125 size_t s = 0; | |
126 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
127 s += lgrp_spaces()->at(i)->space()->used_in_words(); | |
128 } | |
129 return s; | |
130 } | |
131 | |
132 size_t MutableNUMASpace::free_in_words() const { | |
133 size_t s = 0; | |
134 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
135 s += lgrp_spaces()->at(i)->space()->free_in_words(); | |
136 } | |
137 return s; | |
138 } | |
139 | |
140 | |
141 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { | |
142 guarantee(thr != NULL, "No thread"); | |
143 int lgrp_id = thr->lgrp_id(); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
144 if (lgrp_id == -1) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
145 // This case can occur after the topology of the system has |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
146 // changed. Thread can change their location, the new home |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
147 // group will be determined during the first allocation |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
148 // attempt. For now we can safely assume that all spaces |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
149 // have equal size because the whole space will be reinitialized. |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
150 if (lgrp_spaces()->length() > 0) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
151 return capacity_in_bytes() / lgrp_spaces()->length(); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
152 } else { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
153 assert(false, "There should be at least one locality group"); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
154 return 0; |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
155 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
156 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
157 // That's the normal case, where we know the locality group of the thread. |
0 | 158 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
159 if (i == -1) { | |
160 return 0; | |
161 } | |
162 return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); | |
163 } | |
164 | |
165 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
166 // Please see the comments for tlab_capacity(). |
0 | 167 guarantee(thr != NULL, "No thread"); |
168 int lgrp_id = thr->lgrp_id(); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
169 if (lgrp_id == -1) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
170 if (lgrp_spaces()->length() > 0) { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
171 return free_in_bytes() / lgrp_spaces()->length(); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
172 } else { |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
173 assert(false, "There should be at least one locality group"); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
174 return 0; |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
175 } |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
176 } |
0 | 177 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
178 if (i == -1) { | |
179 return 0; | |
180 } | |
181 return lgrp_spaces()->at(i)->space()->free_in_bytes(); | |
182 } | |
183 | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
184 |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
185 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
186 guarantee(thr != NULL, "No thread"); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
187 int lgrp_id = thr->lgrp_id(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
188 if (lgrp_id == -1) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
189 if (lgrp_spaces()->length() > 0) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
190 return capacity_in_words() / lgrp_spaces()->length(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
191 } else { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
192 assert(false, "There should be at least one locality group"); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
193 return 0; |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
194 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
195 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
196 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
197 if (i == -1) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
198 return 0; |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
199 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
200 return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
201 } |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
202 |
0 | 203 // Check if the NUMA topology has changed. Add and remove spaces if needed. |
204 // The update can be forced by setting the force parameter equal to true. | |
205 bool MutableNUMASpace::update_layout(bool force) { | |
206 // Check if the topology had changed. | |
207 bool changed = os::numa_topology_changed(); | |
208 if (force || changed) { | |
209 // Compute lgrp intersection. Add/remove spaces. | |
210 int lgrp_limit = (int)os::numa_get_groups_num(); | |
211 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); | |
212 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); | |
213 assert(lgrp_num > 0, "There should be at least one locality group"); | |
214 // Add new spaces for the new nodes | |
215 for (int i = 0; i < lgrp_num; i++) { | |
216 bool found = false; | |
217 for (int j = 0; j < lgrp_spaces()->length(); j++) { | |
218 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { | |
219 found = true; | |
220 break; | |
221 } | |
222 } | |
223 if (!found) { | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
224 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); |
0 | 225 } |
226 } | |
227 | |
228 // Remove spaces for the removed nodes. | |
229 for (int i = 0; i < lgrp_spaces()->length();) { | |
230 bool found = false; | |
231 for (int j = 0; j < lgrp_num; j++) { | |
232 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { | |
233 found = true; | |
234 break; | |
235 } | |
236 } | |
237 if (!found) { | |
238 delete lgrp_spaces()->at(i); | |
239 lgrp_spaces()->remove_at(i); | |
240 } else { | |
241 i++; | |
242 } | |
243 } | |
244 | |
245 FREE_C_HEAP_ARRAY(int, lgrp_ids); | |
246 | |
247 if (changed) { | |
248 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { | |
249 thread->set_lgrp_id(-1); | |
250 } | |
251 } | |
252 return true; | |
253 } | |
254 return false; | |
255 } | |
256 | |
257 // Bias region towards the first-touching lgrp. Set the right page sizes. | |
141 | 258 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
0 | 259 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
260 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); | |
261 if (end > start) { | |
262 MemRegion aligned_region(start, end); | |
263 assert((intptr_t)aligned_region.start() % page_size() == 0 && | |
264 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); | |
265 assert(region().contains(aligned_region), "Sanity"); | |
141 | 266 // First we tell the OS which page size we want in the given range. The underlying |
267 // large page can be broken down if we require small pages. | |
0 | 268 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
141 | 269 // Then we uncommit the pages in the range. |
270 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); | |
271 // And make them local/first-touch biased. | |
272 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); | |
0 | 273 } |
274 } | |
275 | |
276 // Free all pages in the region. | |
277 void MutableNUMASpace::free_region(MemRegion mr) { | |
278 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); | |
279 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); | |
280 if (end > start) { | |
281 MemRegion aligned_region(start, end); | |
282 assert((intptr_t)aligned_region.start() % page_size() == 0 && | |
283 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); | |
284 assert(region().contains(aligned_region), "Sanity"); | |
285 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); | |
286 } | |
287 } | |
288 | |
289 // Update space layout. Perform adaptation. | |
290 void MutableNUMASpace::update() { | |
291 if (update_layout(false)) { | |
292 // If the topology has changed, make all chunks zero-sized. | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
293 // And clear the alloc-rate statistics. |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
294 // In future we may want to handle this more gracefully in order |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
295 // to avoid the reallocation of the pages as much as possible. |
0 | 296 for (int i = 0; i < lgrp_spaces()->length(); i++) { |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
297 LGRPSpace *ls = lgrp_spaces()->at(i); |
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
298 MutableSpace *s = ls->space(); |
0 | 299 s->set_end(s->bottom()); |
300 s->set_top(s->bottom()); | |
268
d6340ab4105b
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
263
diff
changeset
|
301 ls->clear_alloc_rate(); |
0 | 302 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
303 // A NUMA space is never mangled |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
304 initialize(region(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
305 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
306 SpaceDecorator::DontMangle); |
0 | 307 } else { |
308 bool should_initialize = false; | |
141 | 309 if (!os::numa_has_static_binding()) { |
310 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
311 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { | |
312 should_initialize = true; | |
313 break; | |
314 } | |
0 | 315 } |
316 } | |
317 | |
318 if (should_initialize || | |
319 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
320 // A NUMA space is never mangled |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
321 initialize(region(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
322 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
323 SpaceDecorator::DontMangle); |
0 | 324 } |
325 } | |
326 | |
327 if (NUMAStats) { | |
328 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
329 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); | |
330 } | |
331 } | |
332 | |
333 scan_pages(NUMAPageScanRate); | |
334 } | |
335 | |
336 // Scan pages. Free pages that have smaller size or wrong placement. | |
337 void MutableNUMASpace::scan_pages(size_t page_count) | |
338 { | |
339 size_t pages_per_chunk = page_count / lgrp_spaces()->length(); | |
340 if (pages_per_chunk > 0) { | |
341 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
342 LGRPSpace *ls = lgrp_spaces()->at(i); | |
343 ls->scan_pages(page_size(), pages_per_chunk); | |
344 } | |
345 } | |
346 } | |
347 | |
348 // Accumulate statistics about the allocation rate of each lgrp. | |
349 void MutableNUMASpace::accumulate_statistics() { | |
350 if (UseAdaptiveNUMAChunkSizing) { | |
351 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
352 lgrp_spaces()->at(i)->sample(); | |
353 } | |
354 increment_samples_count(); | |
355 } | |
356 | |
357 if (NUMAStats) { | |
358 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
359 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); | |
360 } | |
361 } | |
362 } | |
363 | |
364 // Get the current size of a chunk. | |
365 // This function computes the size of the chunk based on the | |
366 // difference between chunk ends. This allows it to work correctly in | |
367 // case the whole space is resized and during the process of adaptive | |
368 // chunk resizing. | |
369 size_t MutableNUMASpace::current_chunk_size(int i) { | |
370 HeapWord *cur_end, *prev_end; | |
371 if (i == 0) { | |
372 prev_end = bottom(); | |
373 } else { | |
374 prev_end = lgrp_spaces()->at(i - 1)->space()->end(); | |
375 } | |
376 if (i == lgrp_spaces()->length() - 1) { | |
377 cur_end = end(); | |
378 } else { | |
379 cur_end = lgrp_spaces()->at(i)->space()->end(); | |
380 } | |
381 if (cur_end > prev_end) { | |
382 return pointer_delta(cur_end, prev_end, sizeof(char)); | |
383 } | |
384 return 0; | |
385 } | |
386 | |
387 // Return the default chunk size by equally diving the space. | |
388 // page_size() aligned. | |
389 size_t MutableNUMASpace::default_chunk_size() { | |
390 return base_space_size() / lgrp_spaces()->length() * page_size(); | |
391 } | |
392 | |
393 // Produce a new chunk size. page_size() aligned. | |
391
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
394 // This function is expected to be called on sequence of i's from 0 to |
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
395 // lgrp_spaces()->length(). |
0 | 396 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
397 size_t pages_available = base_space_size(); | |
398 for (int j = 0; j < i; j++) { | |
399 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size(); | |
400 } | |
401 pages_available -= lgrp_spaces()->length() - i - 1; | |
402 assert(pages_available > 0, "No pages left"); | |
403 float alloc_rate = 0; | |
404 for (int j = i; j < lgrp_spaces()->length(); j++) { | |
405 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); | |
406 } | |
407 size_t chunk_size = 0; | |
408 if (alloc_rate > 0) { | |
409 LGRPSpace *ls = lgrp_spaces()->at(i); | |
391
ab4a7734b9c4
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
373
diff
changeset
|
410 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
0 | 411 } |
412 chunk_size = MAX2(chunk_size, page_size()); | |
413 | |
414 if (limit > 0) { | |
415 limit = round_down(limit, page_size()); | |
416 if (chunk_size > current_chunk_size(i)) { | |
462
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
417 size_t upper_bound = pages_available * page_size(); |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
418 if (upper_bound > limit && |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
419 current_chunk_size(i) < upper_bound - limit) { |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
420 // The resulting upper bound should not exceed the available |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
421 // amount of memory (pages_available * page_size()). |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
422 upper_bound = current_chunk_size(i) + limit; |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
423 } |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
424 chunk_size = MIN2(chunk_size, upper_bound); |
0 | 425 } else { |
462
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
426 size_t lower_bound = page_size(); |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
427 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
428 lower_bound = current_chunk_size(i) - limit; |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
429 } |
85f1b9537f70
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
391
diff
changeset
|
430 chunk_size = MAX2(chunk_size, lower_bound); |
0 | 431 } |
432 } | |
433 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); | |
434 return chunk_size; | |
435 } | |
436 | |
437 | |
438 // Return the bottom_region and the top_region. Align them to page_size() boundary. | |
439 // |------------------new_region---------------------------------| | |
440 // |----bottom_region--|---intersection---|------top_region------| | |
441 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, | |
442 MemRegion* bottom_region, MemRegion *top_region) { | |
443 // Is there bottom? | |
444 if (new_region.start() < intersection.start()) { // Yes | |
445 // Try to coalesce small pages into a large one. | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
446 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
447 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment()); |
0 | 448 if (new_region.contains(p) |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
449 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { |
0 | 450 if (intersection.contains(p)) { |
451 intersection = MemRegion(p, intersection.end()); | |
452 } else { | |
453 intersection = MemRegion(p, p); | |
454 } | |
455 } | |
456 } | |
457 *bottom_region = MemRegion(new_region.start(), intersection.start()); | |
458 } else { | |
459 *bottom_region = MemRegion(); | |
460 } | |
461 | |
462 // Is there top? | |
463 if (intersection.end() < new_region.end()) { // Yes | |
464 // Try to coalesce small pages into a large one. | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
465 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
466 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment()); |
0 | 467 if (new_region.contains(p) |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
468 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { |
0 | 469 if (intersection.contains(p)) { |
470 intersection = MemRegion(intersection.start(), p); | |
471 } else { | |
472 intersection = MemRegion(p, p); | |
473 } | |
474 } | |
475 } | |
476 *top_region = MemRegion(intersection.end(), new_region.end()); | |
477 } else { | |
478 *top_region = MemRegion(); | |
479 } | |
480 } | |
481 | |
482 // Try to merge the invalid region with the bottom or top region by decreasing | |
483 // the intersection area. Return the invalid_region aligned to the page_size() | |
484 // boundary if it's inside the intersection. Return non-empty invalid_region | |
485 // if it lies inside the intersection (also page-aligned). | |
486 // |------------------new_region---------------------------------| | |
487 // |----------------|-------invalid---|--------------------------| | |
488 // |----bottom_region--|---intersection---|------top_region------| | |
489 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, | |
490 MemRegion *invalid_region) { | |
491 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { | |
492 *intersection = MemRegion(invalid_region->end(), intersection->end()); | |
493 *invalid_region = MemRegion(); | |
494 } else | |
495 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { | |
496 *intersection = MemRegion(intersection->start(), invalid_region->start()); | |
497 *invalid_region = MemRegion(); | |
498 } else | |
499 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { | |
500 *intersection = MemRegion(new_region.start(), new_region.start()); | |
501 *invalid_region = MemRegion(); | |
502 } else | |
503 if (intersection->contains(invalid_region)) { | |
504 // That's the only case we have to make an additional bias_region() call. | |
505 HeapWord* start = invalid_region->start(); | |
506 HeapWord* end = invalid_region->end(); | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
507 if (UseLargePages && page_size() >= alignment()) { |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
508 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment()); |
0 | 509 if (new_region.contains(p)) { |
510 start = p; | |
511 } | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
512 p = (HeapWord*)round_to((intptr_t) end, alignment()); |
0 | 513 if (new_region.contains(end)) { |
514 end = p; | |
515 } | |
516 } | |
517 if (intersection->start() > start) { | |
518 *intersection = MemRegion(start, intersection->end()); | |
519 } | |
520 if (intersection->end() < end) { | |
521 *intersection = MemRegion(intersection->start(), end); | |
522 } | |
523 *invalid_region = MemRegion(start, end); | |
524 } | |
525 } | |
526 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
527 void MutableNUMASpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
528 bool clear_space, |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
529 bool mangle_space, |
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
530 bool setup_pages) { |
0 | 531 assert(clear_space, "Reallocation will destory data!"); |
532 assert(lgrp_spaces()->length() > 0, "There should be at least one space"); | |
533 | |
534 MemRegion old_region = region(), new_region; | |
535 set_bottom(mr.start()); | |
536 set_end(mr.end()); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
537 // Must always clear the space |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
538 clear(SpaceDecorator::DontMangle); |
0 | 539 |
540 // Compute chunk sizes | |
541 size_t prev_page_size = page_size(); | |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
542 set_page_size(UseLargePages ? alignment() : os::vm_page_size()); |
0 | 543 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
544 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); | |
545 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); | |
546 | |
547 // Try small pages if the chunk size is too small | |
548 if (base_space_size_pages / lgrp_spaces()->length() == 0 | |
549 && page_size() > (size_t)os::vm_page_size()) { | |
550 set_page_size(os::vm_page_size()); | |
551 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); | |
552 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); | |
553 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); | |
554 } | |
555 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); | |
556 set_base_space_size(base_space_size_pages); | |
557 | |
558 // Handle space resize | |
559 MemRegion top_region, bottom_region; | |
560 if (!old_region.equals(region())) { | |
561 new_region = MemRegion(rounded_bottom, rounded_end); | |
562 MemRegion intersection = new_region.intersection(old_region); | |
563 if (intersection.start() == NULL || | |
564 intersection.end() == NULL || | |
565 prev_page_size > page_size()) { // If the page size got smaller we have to change | |
566 // the page size preference for the whole space. | |
567 intersection = MemRegion(new_region.start(), new_region.start()); | |
568 } | |
569 select_tails(new_region, intersection, &bottom_region, &top_region); | |
141 | 570 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
571 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); | |
0 | 572 } |
573 | |
574 // Check if the space layout has changed significantly? | |
575 // This happens when the space has been resized so that either head or tail | |
576 // chunk became less than a page. | |
577 bool layout_valid = UseAdaptiveNUMAChunkSizing && | |
578 current_chunk_size(0) > page_size() && | |
579 current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); | |
580 | |
581 | |
582 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
583 LGRPSpace *ls = lgrp_spaces()->at(i); | |
584 MutableSpace *s = ls->space(); | |
585 old_region = s->region(); | |
586 | |
587 size_t chunk_byte_size = 0, old_chunk_byte_size = 0; | |
588 if (i < lgrp_spaces()->length() - 1) { | |
589 if (!UseAdaptiveNUMAChunkSizing || | |
590 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || | |
591 samples_count() < AdaptiveSizePolicyReadyThreshold) { | |
592 // No adaptation. Divide the space equally. | |
593 chunk_byte_size = default_chunk_size(); | |
594 } else | |
595 if (!layout_valid || NUMASpaceResizeRate == 0) { | |
596 // Fast adaptation. If no space resize rate is set, resize | |
597 // the chunks instantly. | |
598 chunk_byte_size = adaptive_chunk_size(i, 0); | |
599 } else { | |
600 // Slow adaptation. Resize the chunks moving no more than | |
601 // NUMASpaceResizeRate bytes per collection. | |
602 size_t limit = NUMASpaceResizeRate / | |
603 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); | |
604 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); | |
605 } | |
606 | |
607 assert(chunk_byte_size >= page_size(), "Chunk size too small"); | |
608 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check"); | |
609 } | |
610 | |
611 if (i == 0) { // Bottom chunk | |
612 if (i != lgrp_spaces()->length() - 1) { | |
613 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); | |
614 } else { | |
615 new_region = MemRegion(bottom(), end()); | |
616 } | |
617 } else | |
618 if (i < lgrp_spaces()->length() - 1) { // Middle chunks | |
619 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); | |
620 new_region = MemRegion(ps->end(), | |
621 ps->end() + (chunk_byte_size >> LogHeapWordSize)); | |
622 } else { // Top chunk | |
623 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); | |
624 new_region = MemRegion(ps->end(), end()); | |
625 } | |
626 guarantee(region().contains(new_region), "Region invariant"); | |
627 | |
628 | |
629 // The general case: | |
630 // |---------------------|--invalid---|--------------------------| | |
631 // |------------------new_region---------------------------------| | |
632 // |----bottom_region--|---intersection---|------top_region------| | |
633 // |----old_region----| | |
634 // The intersection part has all pages in place we don't need to migrate them. | |
635 // Pages for the top and bottom part should be freed and then reallocated. | |
636 | |
637 MemRegion intersection = old_region.intersection(new_region); | |
638 | |
639 if (intersection.start() == NULL || intersection.end() == NULL) { | |
640 intersection = MemRegion(new_region.start(), new_region.start()); | |
641 } | |
642 | |
141 | 643 if (!os::numa_has_static_binding()) { |
644 MemRegion invalid_region = ls->invalid_region().intersection(new_region); | |
645 // Invalid region is a range of memory that could've possibly | |
646 // been allocated on the other node. That's relevant only on Solaris where | |
647 // there is no static memory binding. | |
648 if (!invalid_region.is_empty()) { | |
649 merge_regions(new_region, &intersection, &invalid_region); | |
650 free_region(invalid_region); | |
651 ls->set_invalid_region(MemRegion()); | |
652 } | |
0 | 653 } |
141 | 654 |
0 | 655 select_tails(new_region, intersection, &bottom_region, &top_region); |
141 | 656 |
657 if (!os::numa_has_static_binding()) { | |
658 // If that's a system with the first-touch policy then it's enough | |
659 // to free the pages. | |
660 free_region(bottom_region); | |
661 free_region(top_region); | |
662 } else { | |
663 // In a system with static binding we have to change the bias whenever | |
664 // we reshape the heap. | |
665 bias_region(bottom_region, ls->lgrp_id()); | |
666 bias_region(top_region, ls->lgrp_id()); | |
667 } | |
0 | 668 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
669 // Clear space (set top = bottom) but never mangle. |
535
4e400c36026f
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
481
diff
changeset
|
670 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); |
0 | 671 |
672 set_adaptation_cycles(samples_count()); | |
673 } | |
674 } | |
675 | |
676 // Set the top of the whole space. | |
677 // Mark the the holes in chunks below the top() as invalid. | |
678 void MutableNUMASpace::set_top(HeapWord* value) { | |
679 bool found_top = false; | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
680 for (int i = 0; i < lgrp_spaces()->length();) { |
0 | 681 LGRPSpace *ls = lgrp_spaces()->at(i); |
682 MutableSpace *s = ls->space(); | |
683 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); | |
684 | |
685 if (s->contains(value)) { | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
686 // Check if setting the chunk's top to a given value would create a hole less than |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
687 // a minimal object; assuming that's not the last chunk in which case we don't care. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
688 if (i < lgrp_spaces()->length() - 1) { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
689 size_t remainder = pointer_delta(s->end(), value); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
690 const size_t min_fill_size = CollectedHeap::min_fill_size(); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
691 if (remainder < min_fill_size && remainder > 0) { |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
692 // Add a minimum size filler object; it will cross the chunk boundary. |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
693 CollectedHeap::fill_with_object(value, min_fill_size); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
462
diff
changeset
|
694 value += min_fill_size; |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
695 assert(!s->contains(value), "Should be in the next chunk"); |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
696 // Restart the loop from the same chunk, since the value has moved |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
697 // to the next one. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
698 continue; |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
699 } |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
700 } |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
701 |
141 | 702 if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
0 | 703 ls->add_invalid_region(MemRegion(top, value)); |
704 } | |
705 s->set_top(value); | |
706 found_top = true; | |
707 } else { | |
708 if (found_top) { | |
709 s->set_top(s->bottom()); | |
710 } else { | |
141 | 711 if (!os::numa_has_static_binding() && top < s->end()) { |
712 ls->add_invalid_region(MemRegion(top, s->end())); | |
713 } | |
714 s->set_top(s->end()); | |
0 | 715 } |
716 } | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
717 i++; |
0 | 718 } |
719 MutableSpace::set_top(value); | |
720 } | |
721 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
722 void MutableNUMASpace::clear(bool mangle_space) { |
0 | 723 MutableSpace::set_top(bottom()); |
724 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
725 // Never mangle NUMA spaces because the mangling will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
726 // bind the memory to a possibly unwanted lgroup. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
190
diff
changeset
|
727 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
0 | 728 } |
729 } | |
730 | |
141 | 731 /* |
732 Linux supports static memory binding, therefore the most part of the | |
733 logic dealing with the possible invalid page allocation is effectively | |
734 disabled. Besides there is no notion of the home node in Linux. A | |
735 thread is allowed to migrate freely. Although the scheduler is rather | |
736 reluctant to move threads between the nodes. We check for the current | |
737 node every allocation. And with a high probability a thread stays on | |
738 the same node for some time allowing local access to recently allocated | |
739 objects. | |
740 */ | |
741 | |
0 | 742 HeapWord* MutableNUMASpace::allocate(size_t size) { |
141 | 743 Thread* thr = Thread::current(); |
744 int lgrp_id = thr->lgrp_id(); | |
745 if (lgrp_id == -1 || !os::numa_has_group_homing()) { | |
0 | 746 lgrp_id = os::numa_get_group_id(); |
141 | 747 thr->set_lgrp_id(lgrp_id); |
0 | 748 } |
749 | |
750 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
751 | |
752 // It is possible that a new CPU has been hotplugged and | |
753 // we haven't reshaped the space accordingly. | |
754 if (i == -1) { | |
755 i = os::random() % lgrp_spaces()->length(); | |
756 } | |
757 | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
758 LGRPSpace* ls = lgrp_spaces()->at(i); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
759 MutableSpace *s = ls->space(); |
0 | 760 HeapWord *p = s->allocate(size); |
761 | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
762 if (p != NULL) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
763 size_t remainder = s->free_in_words(); |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
628
diff
changeset
|
764 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
765 s->set_top(s->top() - size); |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
766 p = NULL; |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
767 } |
0 | 768 } |
769 if (p != NULL) { | |
770 if (top() < s->top()) { // Keep _top updated. | |
771 MutableSpace::set_top(s->top()); | |
772 } | |
773 } | |
141 | 774 // Make the page allocation happen here if there is no static binding.. |
775 if (p != NULL && !os::numa_has_static_binding()) { | |
0 | 776 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
777 *(int*)i = 0; | |
778 } | |
779 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
780 if (p == NULL) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
781 ls->set_allocation_failed(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
782 } |
0 | 783 return p; |
784 } | |
785 | |
786 // This version is lock-free. | |
787 HeapWord* MutableNUMASpace::cas_allocate(size_t size) { | |
141 | 788 Thread* thr = Thread::current(); |
789 int lgrp_id = thr->lgrp_id(); | |
790 if (lgrp_id == -1 || !os::numa_has_group_homing()) { | |
0 | 791 lgrp_id = os::numa_get_group_id(); |
141 | 792 thr->set_lgrp_id(lgrp_id); |
0 | 793 } |
794 | |
795 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); | |
796 // It is possible that a new CPU has been hotplugged and | |
797 // we haven't reshaped the space accordingly. | |
798 if (i == -1) { | |
799 i = os::random() % lgrp_spaces()->length(); | |
800 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
801 LGRPSpace *ls = lgrp_spaces()->at(i); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
802 MutableSpace *s = ls->space(); |
0 | 803 HeapWord *p = s->cas_allocate(size); |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
804 if (p != NULL) { |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
805 size_t remainder = pointer_delta(s->end(), p + size); |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
628
diff
changeset
|
806 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
807 if (s->cas_deallocate(p, size)) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
808 // We were the last to allocate and created a fragment less than |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
809 // a minimal object. |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
810 p = NULL; |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
811 } else { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
812 guarantee(false, "Deallocation should always succeed"); |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
813 } |
0 | 814 } |
815 } | |
816 if (p != NULL) { | |
817 HeapWord* cur_top, *cur_chunk_top = p + size; | |
818 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. | |
819 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { | |
820 break; | |
821 } | |
822 } | |
823 } | |
824 | |
141 | 825 // Make the page allocation happen here if there is no static binding. |
826 if (p != NULL && !os::numa_has_static_binding() ) { | |
0 | 827 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
828 *(int*)i = 0; | |
829 } | |
830 } | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
831 if (p == NULL) { |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
832 ls->set_allocation_failed(); |
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
833 } |
0 | 834 return p; |
835 } | |
836 | |
837 void MutableNUMASpace::print_short_on(outputStream* st) const { | |
838 MutableSpace::print_short_on(st); | |
839 st->print(" ("); | |
840 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
841 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id()); | |
842 lgrp_spaces()->at(i)->space()->print_short_on(st); | |
843 if (i < lgrp_spaces()->length() - 1) { | |
844 st->print(", "); | |
845 } | |
846 } | |
847 st->print(")"); | |
848 } | |
849 | |
850 void MutableNUMASpace::print_on(outputStream* st) const { | |
851 MutableSpace::print_on(st); | |
852 for (int i = 0; i < lgrp_spaces()->length(); i++) { | |
853 LGRPSpace *ls = lgrp_spaces()->at(i); | |
854 st->print(" lgrp %d", ls->lgrp_id()); | |
855 ls->space()->print_on(st); | |
856 if (NUMAStats) { | |
144
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
857 for (int i = 0; i < lgrp_spaces()->length(); i++) { |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
858 lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
141
diff
changeset
|
859 } |
0 | 860 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n", |
861 ls->space_stats()->_local_space / K, | |
862 ls->space_stats()->_remote_space / K, | |
863 ls->space_stats()->_unbiased_space / K, | |
864 ls->space_stats()->_uncommited_space / K, | |
865 ls->space_stats()->_large_pages, | |
866 ls->space_stats()->_small_pages); | |
867 } | |
868 } | |
869 } | |
870 | |
190
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
871 void MutableNUMASpace::verify(bool allow_dirty) { |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
872 // This can be called after setting an arbitary value to the space's top, |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
873 // so an object can cross the chunk boundary. We ensure the parsablity |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
874 // of the space and just walk the objects in linear fashion. |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
875 ensure_parsability(); |
d1635bf93939
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
144
diff
changeset
|
876 MutableSpace::verify(allow_dirty); |
0 | 877 } |
878 | |
879 // Scan pages and gather stats about page placement and size. | |
880 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { | |
881 clear_space_stats(); | |
882 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size); | |
883 char* end = (char*)round_down((intptr_t) space()->end(), page_size); | |
884 if (start < end) { | |
885 for (char *p = start; p < end;) { | |
886 os::page_info info; | |
887 if (os::get_page_info(p, &info)) { | |
888 if (info.size > 0) { | |
889 if (info.size > (size_t)os::vm_page_size()) { | |
890 space_stats()->_large_pages++; | |
891 } else { | |
892 space_stats()->_small_pages++; | |
893 } | |
894 if (info.lgrp_id == lgrp_id()) { | |
895 space_stats()->_local_space += info.size; | |
896 } else { | |
897 space_stats()->_remote_space += info.size; | |
898 } | |
899 p += info.size; | |
900 } else { | |
901 p += os::vm_page_size(); | |
902 space_stats()->_uncommited_space += os::vm_page_size(); | |
903 } | |
904 } else { | |
905 return; | |
906 } | |
907 } | |
908 } | |
909 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + | |
910 pointer_delta(space()->end(), end, sizeof(char)); | |
911 | |
912 } | |
913 | |
914 // Scan page_count pages and verify if they have the right size and right placement. | |
915 // If invalid pages are found they are freed in hope that subsequent reallocation | |
916 // will be more successful. | |
917 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) | |
918 { | |
919 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size); | |
920 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size); | |
921 | |
922 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { | |
923 set_last_page_scanned(range_start); | |
924 } | |
925 | |
926 char *scan_start = last_page_scanned(); | |
927 char* scan_end = MIN2(scan_start + page_size * page_count, range_end); | |
928 | |
929 os::page_info page_expected, page_found; | |
930 page_expected.size = page_size; | |
931 page_expected.lgrp_id = lgrp_id(); | |
932 | |
933 char *s = scan_start; | |
934 while (s < scan_end) { | |
935 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); | |
936 if (e == NULL) { | |
937 break; | |
938 } | |
939 if (e != scan_end) { | |
940 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) | |
941 && page_expected.size != 0) { | |
942 os::free_memory(s, pointer_delta(e, s, sizeof(char))); | |
943 } | |
944 page_expected = page_found; | |
945 } | |
946 s = e; | |
947 } | |
948 | |
949 set_last_page_scanned(scan_end); | |
950 } |