Mercurial > hg > truffle
annotate src/share/vm/runtime/virtualspace.cpp @ 325:93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
Summary: Turn off c2 implicit null checking on windows and large pages specified.
Reviewed-by: jrose, xlu
author | coleenp |
---|---|
date | Tue, 02 Sep 2008 15:18:26 -0400 |
parents | 1fdb98a17101 |
children | 9ee9cf798b59 032ddb9432ad |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_virtualspace.cpp.incl" | |
27 | |
28 | |
29 // ReservedSpace | |
30 ReservedSpace::ReservedSpace(size_t size) { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
31 initialize(size, 0, false, NULL, 0); |
0 | 32 } |
33 | |
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment, | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
35 bool large, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
36 char* requested_address, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
37 const size_t noaccess_prefix) { |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
38 initialize(size+noaccess_prefix, alignment, large, requested_address, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
39 noaccess_prefix); |
0 | 40 } |
41 | |
42 char * | |
43 ReservedSpace::align_reserved_region(char* addr, const size_t len, | |
44 const size_t prefix_size, | |
45 const size_t prefix_align, | |
46 const size_t suffix_size, | |
47 const size_t suffix_align) | |
48 { | |
49 assert(addr != NULL, "sanity"); | |
50 const size_t required_size = prefix_size + suffix_size; | |
51 assert(len >= required_size, "len too small"); | |
52 | |
53 const size_t s = size_t(addr); | |
54 const size_t beg_ofs = s + prefix_size & suffix_align - 1; | |
55 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs; | |
56 | |
57 if (len < beg_delta + required_size) { | |
58 return NULL; // Cannot do proper alignment. | |
59 } | |
60 const size_t end_delta = len - (beg_delta + required_size); | |
61 | |
62 if (beg_delta != 0) { | |
63 os::release_memory(addr, beg_delta); | |
64 } | |
65 | |
66 if (end_delta != 0) { | |
67 char* release_addr = (char*) (s + beg_delta + required_size); | |
68 os::release_memory(release_addr, end_delta); | |
69 } | |
70 | |
71 return (char*) (s + beg_delta); | |
72 } | |
73 | |
74 char* ReservedSpace::reserve_and_align(const size_t reserve_size, | |
75 const size_t prefix_size, | |
76 const size_t prefix_align, | |
77 const size_t suffix_size, | |
78 const size_t suffix_align) | |
79 { | |
80 assert(reserve_size > prefix_size + suffix_size, "should not be here"); | |
81 | |
82 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align); | |
83 if (raw_addr == NULL) return NULL; | |
84 | |
85 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size, | |
86 prefix_align, suffix_size, | |
87 suffix_align); | |
88 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) { | |
89 fatal("os::release_memory failed"); | |
90 } | |
91 | |
92 #ifdef ASSERT | |
93 if (result != NULL) { | |
94 const size_t raw = size_t(raw_addr); | |
95 const size_t res = size_t(result); | |
96 assert(res >= raw, "alignment decreased start addr"); | |
97 assert(res + prefix_size + suffix_size <= raw + reserve_size, | |
98 "alignment increased end addr"); | |
99 assert((res & prefix_align - 1) == 0, "bad alignment of prefix"); | |
100 assert((res + prefix_size & suffix_align - 1) == 0, | |
101 "bad alignment of suffix"); | |
102 } | |
103 #endif | |
104 | |
105 return result; | |
106 } | |
107 | |
108 ReservedSpace::ReservedSpace(const size_t prefix_size, | |
109 const size_t prefix_align, | |
110 const size_t suffix_size, | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
111 const size_t suffix_align, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
112 const size_t noaccess_prefix) |
0 | 113 { |
114 assert(prefix_size != 0, "sanity"); | |
115 assert(prefix_align != 0, "sanity"); | |
116 assert(suffix_size != 0, "sanity"); | |
117 assert(suffix_align != 0, "sanity"); | |
118 assert((prefix_size & prefix_align - 1) == 0, | |
119 "prefix_size not divisible by prefix_align"); | |
120 assert((suffix_size & suffix_align - 1) == 0, | |
121 "suffix_size not divisible by suffix_align"); | |
122 assert((suffix_align & prefix_align - 1) == 0, | |
123 "suffix_align not divisible by prefix_align"); | |
124 | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
125 // Add in noaccess_prefix to prefix_size; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
126 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
127 const size_t size = adjusted_prefix_size + suffix_size; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
128 |
0 | 129 // On systems where the entire region has to be reserved and committed up |
130 // front, the compound alignment normally done by this method is unnecessary. | |
131 const bool try_reserve_special = UseLargePages && | |
132 prefix_align == os::large_page_size(); | |
133 if (!os::can_commit_large_page_memory() && try_reserve_special) { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
134 initialize(size, prefix_align, true, NULL, noaccess_prefix); |
0 | 135 return; |
136 } | |
137 | |
138 _base = NULL; | |
139 _size = 0; | |
140 _alignment = 0; | |
141 _special = false; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
142 _noaccess_prefix = 0; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
143 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
144 // Assert that if noaccess_prefix is used, it is the same as prefix_align. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
145 assert(noaccess_prefix == 0 || |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
146 noaccess_prefix == prefix_align, "noaccess prefix wrong"); |
0 | 147 |
148 // Optimistically try to reserve the exact size needed. | |
149 char* addr = os::reserve_memory(size, NULL, prefix_align); | |
150 if (addr == NULL) return; | |
151 | |
152 // Check whether the result has the needed alignment (unlikely unless | |
153 // prefix_align == suffix_align). | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
154 const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1; |
0 | 155 if (ofs != 0) { |
156 // Wrong alignment. Release, allocate more space and do manual alignment. | |
157 // | |
158 // On most operating systems, another allocation with a somewhat larger size | |
159 // will return an address "close to" that of the previous allocation. The | |
160 // result is often the same address (if the kernel hands out virtual | |
161 // addresses from low to high), or an address that is offset by the increase | |
162 // in size. Exploit that to minimize the amount of extra space requested. | |
163 if (!os::release_memory(addr, size)) { | |
164 fatal("os::release_memory failed"); | |
165 } | |
166 | |
167 const size_t extra = MAX2(ofs, suffix_align - ofs); | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
168 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align, |
0 | 169 suffix_size, suffix_align); |
170 if (addr == NULL) { | |
171 // Try an even larger region. If this fails, address space is exhausted. | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
172 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, |
0 | 173 prefix_align, suffix_size, suffix_align); |
174 } | |
175 } | |
176 | |
177 _base = addr; | |
178 _size = size; | |
179 _alignment = prefix_align; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
180 _noaccess_prefix = noaccess_prefix; |
0 | 181 } |
182 | |
183 void ReservedSpace::initialize(size_t size, size_t alignment, bool large, | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
184 char* requested_address, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
185 const size_t noaccess_prefix) { |
0 | 186 const size_t granularity = os::vm_allocation_granularity(); |
187 assert((size & granularity - 1) == 0, | |
188 "size not aligned to os::vm_allocation_granularity()"); | |
189 assert((alignment & granularity - 1) == 0, | |
190 "alignment not aligned to os::vm_allocation_granularity()"); | |
191 assert(alignment == 0 || is_power_of_2((intptr_t)alignment), | |
192 "not a power of 2"); | |
193 | |
194 _base = NULL; | |
195 _size = 0; | |
196 _special = false; | |
197 _alignment = 0; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
198 _noaccess_prefix = 0; |
0 | 199 if (size == 0) { |
200 return; | |
201 } | |
202 | |
203 // If OS doesn't support demand paging for large page memory, we need | |
204 // to use reserve_memory_special() to reserve and pin the entire region. | |
205 bool special = large && !os::can_commit_large_page_memory(); | |
206 char* base = NULL; | |
207 | |
208 if (special) { | |
209 // It's not hard to implement reserve_memory_special() such that it can | |
210 // allocate at fixed address, but there seems no use of this feature | |
211 // for now, so it's not implemented. | |
212 assert(requested_address == NULL, "not implemented"); | |
213 | |
214 base = os::reserve_memory_special(size); | |
215 | |
216 if (base != NULL) { | |
217 // Check alignment constraints | |
218 if (alignment > 0) { | |
219 assert((uintptr_t) base % alignment == 0, | |
220 "Large pages returned a non-aligned address"); | |
221 } | |
222 _special = true; | |
223 } else { | |
224 // failed; try to reserve regular memory below | |
225 } | |
226 } | |
227 | |
228 if (base == NULL) { | |
229 // Optimistically assume that the OSes returns an aligned base pointer. | |
230 // When reserving a large address range, most OSes seem to align to at | |
231 // least 64K. | |
232 | |
233 // If the memory was requested at a particular address, use | |
234 // os::attempt_reserve_memory_at() to avoid over mapping something | |
235 // important. If available space is not detected, return NULL. | |
236 | |
237 if (requested_address != 0) { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
238 base = os::attempt_reserve_memory_at(size, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
239 requested_address-noaccess_prefix); |
0 | 240 } else { |
241 base = os::reserve_memory(size, NULL, alignment); | |
242 } | |
243 | |
244 if (base == NULL) return; | |
245 | |
246 // Check alignment constraints | |
247 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) { | |
248 // Base not aligned, retry | |
249 if (!os::release_memory(base, size)) fatal("os::release_memory failed"); | |
250 // Reserve size large enough to do manual alignment and | |
251 // increase size to a multiple of the desired alignment | |
252 size = align_size_up(size, alignment); | |
253 size_t extra_size = size + alignment; | |
254 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); | |
255 if (extra_base == NULL) return; | |
256 // Do manual alignement | |
257 base = (char*) align_size_up((uintptr_t) extra_base, alignment); | |
258 assert(base >= extra_base, "just checking"); | |
259 // Release unused areas | |
260 size_t unused_bottom_size = base - extra_base; | |
261 size_t unused_top_size = extra_size - size - unused_bottom_size; | |
262 assert(unused_bottom_size % os::vm_allocation_granularity() == 0, | |
263 "size not allocation aligned"); | |
264 assert(unused_top_size % os::vm_allocation_granularity() == 0, | |
265 "size not allocation aligned"); | |
266 if (unused_bottom_size > 0) { | |
267 os::release_memory(extra_base, unused_bottom_size); | |
268 } | |
269 if (unused_top_size > 0) { | |
270 os::release_memory(base + size, unused_top_size); | |
271 } | |
272 } | |
273 } | |
274 // Done | |
275 _base = base; | |
276 _size = size; | |
277 _alignment = MAX2(alignment, (size_t) os::vm_page_size()); | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
278 _noaccess_prefix = noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
279 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
280 // Assert that if noaccess_prefix is used, it is the same as alignment. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
281 assert(noaccess_prefix == 0 || |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
282 noaccess_prefix == _alignment, "noaccess prefix wrong"); |
0 | 283 |
284 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, | |
285 "area must be distinguisable from marks for mark-sweep"); | |
286 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], | |
287 "area must be distinguisable from marks for mark-sweep"); | |
288 } | |
289 | |
290 | |
291 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, | |
292 bool special) { | |
293 assert((size % os::vm_allocation_granularity()) == 0, | |
294 "size not allocation aligned"); | |
295 _base = base; | |
296 _size = size; | |
297 _alignment = alignment; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
298 _noaccess_prefix = 0; |
0 | 299 _special = special; |
300 } | |
301 | |
302 | |
303 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, | |
304 bool split, bool realloc) { | |
305 assert(partition_size <= size(), "partition failed"); | |
306 if (split) { | |
307 os::split_reserved_memory(_base, _size, partition_size, realloc); | |
308 } | |
309 ReservedSpace result(base(), partition_size, alignment, special()); | |
310 return result; | |
311 } | |
312 | |
313 | |
314 ReservedSpace | |
315 ReservedSpace::last_part(size_t partition_size, size_t alignment) { | |
316 assert(partition_size <= size(), "partition failed"); | |
317 ReservedSpace result(base() + partition_size, size() - partition_size, | |
318 alignment, special()); | |
319 return result; | |
320 } | |
321 | |
322 | |
323 size_t ReservedSpace::page_align_size_up(size_t size) { | |
324 return align_size_up(size, os::vm_page_size()); | |
325 } | |
326 | |
327 | |
328 size_t ReservedSpace::page_align_size_down(size_t size) { | |
329 return align_size_down(size, os::vm_page_size()); | |
330 } | |
331 | |
332 | |
333 size_t ReservedSpace::allocation_align_size_up(size_t size) { | |
334 return align_size_up(size, os::vm_allocation_granularity()); | |
335 } | |
336 | |
337 | |
338 size_t ReservedSpace::allocation_align_size_down(size_t size) { | |
339 return align_size_down(size, os::vm_allocation_granularity()); | |
340 } | |
341 | |
342 | |
343 void ReservedSpace::release() { | |
344 if (is_reserved()) { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
345 char *real_base = _base - _noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
346 const size_t real_size = _size + _noaccess_prefix; |
0 | 347 if (special()) { |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
348 os::release_memory_special(real_base, real_size); |
0 | 349 } else{ |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
350 os::release_memory(real_base, real_size); |
0 | 351 } |
352 _base = NULL; | |
353 _size = 0; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
354 _noaccess_prefix = 0; |
0 | 355 _special = false; |
356 } | |
357 } | |
358 | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
359 void ReservedSpace::protect_noaccess_prefix(const size_t size) { |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
360 // If there is noaccess prefix, return. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
361 if (_noaccess_prefix == 0) return; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
362 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
363 assert(_noaccess_prefix >= (size_t)os::vm_page_size(), |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
364 "must be at least page size big"); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
365 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
366 // Protect memory at the base of the allocated region. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
367 // If special, the page was committed (only matters on windows) |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
368 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
369 _special)) { |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
370 fatal("cannot protect protection page"); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
371 } |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
372 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
373 _base += _noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
374 _size -= _noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
375 assert((size == _size) && ((uintptr_t)_base % _alignment == 0), |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
376 "must be exactly of required size and alignment"); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
377 } |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
378 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
379 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
380 bool large, char* requested_address) : |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
381 ReservedSpace(size, alignment, large, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
382 requested_address, |
325
93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
237
diff
changeset
|
383 UseCompressedOops && UseImplicitNullCheckForNarrowOop ? |
93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
237
diff
changeset
|
384 lcm(os::vm_page_size(), alignment) : 0) { |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
385 // Only reserved space for the java heap should have a noaccess_prefix |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
386 // if using compressed oops. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
387 protect_noaccess_prefix(size); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
388 } |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
389 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
390 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
391 const size_t prefix_align, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
392 const size_t suffix_size, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
393 const size_t suffix_align) : |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
394 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align, |
325
93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
237
diff
changeset
|
395 UseCompressedOops && UseImplicitNullCheckForNarrowOop ? |
93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
237
diff
changeset
|
396 lcm(os::vm_page_size(), prefix_align) : 0) { |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
397 protect_noaccess_prefix(prefix_size+suffix_size); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
398 } |
0 | 399 |
400 // VirtualSpace | |
401 | |
402 VirtualSpace::VirtualSpace() { | |
403 _low_boundary = NULL; | |
404 _high_boundary = NULL; | |
405 _low = NULL; | |
406 _high = NULL; | |
407 _lower_high = NULL; | |
408 _middle_high = NULL; | |
409 _upper_high = NULL; | |
410 _lower_high_boundary = NULL; | |
411 _middle_high_boundary = NULL; | |
412 _upper_high_boundary = NULL; | |
413 _lower_alignment = 0; | |
414 _middle_alignment = 0; | |
415 _upper_alignment = 0; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
416 _special = false; |
0 | 417 } |
418 | |
419 | |
420 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { | |
421 if(!rs.is_reserved()) return false; // allocation failed. | |
422 assert(_low_boundary == NULL, "VirtualSpace already initialized"); | |
423 _low_boundary = rs.base(); | |
424 _high_boundary = low_boundary() + rs.size(); | |
425 | |
426 _low = low_boundary(); | |
427 _high = low(); | |
428 | |
429 _special = rs.special(); | |
430 | |
431 // When a VirtualSpace begins life at a large size, make all future expansion | |
432 // and shrinking occur aligned to a granularity of large pages. This avoids | |
433 // fragmentation of physical addresses that inhibits the use of large pages | |
434 // by the OS virtual memory system. Empirically, we see that with a 4MB | |
435 // page size, the only spaces that get handled this way are codecache and | |
436 // the heap itself, both of which provide a substantial performance | |
437 // boost in many benchmarks when covered by large pages. | |
438 // | |
439 // No attempt is made to force large page alignment at the very top and | |
440 // bottom of the space if they are not aligned so already. | |
441 _lower_alignment = os::vm_page_size(); | |
442 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); | |
443 _upper_alignment = os::vm_page_size(); | |
444 | |
445 // End of each region | |
446 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); | |
447 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); | |
448 _upper_high_boundary = high_boundary(); | |
449 | |
450 // High address of each region | |
451 _lower_high = low_boundary(); | |
452 _middle_high = lower_high_boundary(); | |
453 _upper_high = middle_high_boundary(); | |
454 | |
455 // commit to initial size | |
456 if (committed_size > 0) { | |
457 if (!expand_by(committed_size)) { | |
458 return false; | |
459 } | |
460 } | |
461 return true; | |
462 } | |
463 | |
464 | |
465 VirtualSpace::~VirtualSpace() { | |
466 release(); | |
467 } | |
468 | |
469 | |
470 void VirtualSpace::release() { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
471 // This does not release memory it never reserved. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
472 // Caller must release via rs.release(); |
0 | 473 _low_boundary = NULL; |
474 _high_boundary = NULL; | |
475 _low = NULL; | |
476 _high = NULL; | |
477 _lower_high = NULL; | |
478 _middle_high = NULL; | |
479 _upper_high = NULL; | |
480 _lower_high_boundary = NULL; | |
481 _middle_high_boundary = NULL; | |
482 _upper_high_boundary = NULL; | |
483 _lower_alignment = 0; | |
484 _middle_alignment = 0; | |
485 _upper_alignment = 0; | |
486 _special = false; | |
487 } | |
488 | |
489 | |
490 size_t VirtualSpace::committed_size() const { | |
491 return pointer_delta(high(), low(), sizeof(char)); | |
492 } | |
493 | |
494 | |
495 size_t VirtualSpace::reserved_size() const { | |
496 return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); | |
497 } | |
498 | |
499 | |
500 size_t VirtualSpace::uncommitted_size() const { | |
501 return reserved_size() - committed_size(); | |
502 } | |
503 | |
504 | |
505 bool VirtualSpace::contains(const void* p) const { | |
506 return low() <= (const char*) p && (const char*) p < high(); | |
507 } | |
508 | |
509 /* | |
510 First we need to determine if a particular virtual space is using large | |
511 pages. This is done at the initialize function and only virtual spaces | |
512 that are larger than LargePageSizeInBytes use large pages. Once we | |
513 have determined this, all expand_by and shrink_by calls must grow and | |
514 shrink by large page size chunks. If a particular request | |
515 is within the current large page, the call to commit and uncommit memory | |
516 can be ignored. In the case that the low and high boundaries of this | |
517 space is not large page aligned, the pages leading to the first large | |
518 page address and the pages after the last large page address must be | |
519 allocated with default pages. | |
520 */ | |
521 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { | |
522 if (uncommitted_size() < bytes) return false; | |
523 | |
524 if (special()) { | |
525 // don't commit memory if the entire space is pinned in memory | |
526 _high += bytes; | |
527 return true; | |
528 } | |
529 | |
530 char* previous_high = high(); | |
531 char* unaligned_new_high = high() + bytes; | |
532 assert(unaligned_new_high <= high_boundary(), | |
533 "cannot expand by more than upper boundary"); | |
534 | |
535 // Calculate where the new high for each of the regions should be. If | |
536 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned | |
537 // then the unaligned lower and upper new highs would be the | |
538 // lower_high() and upper_high() respectively. | |
539 char* unaligned_lower_new_high = | |
540 MIN2(unaligned_new_high, lower_high_boundary()); | |
541 char* unaligned_middle_new_high = | |
542 MIN2(unaligned_new_high, middle_high_boundary()); | |
543 char* unaligned_upper_new_high = | |
544 MIN2(unaligned_new_high, upper_high_boundary()); | |
545 | |
546 // Align the new highs based on the regions alignment. lower and upper | |
547 // alignment will always be default page size. middle alignment will be | |
548 // LargePageSizeInBytes if the actual size of the virtual space is in | |
549 // fact larger than LargePageSizeInBytes. | |
550 char* aligned_lower_new_high = | |
551 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); | |
552 char* aligned_middle_new_high = | |
553 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); | |
554 char* aligned_upper_new_high = | |
555 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); | |
556 | |
557 // Determine which regions need to grow in this expand_by call. | |
558 // If you are growing in the lower region, high() must be in that | |
559 // region so calcuate the size based on high(). For the middle and | |
560 // upper regions, determine the starting point of growth based on the | |
561 // location of high(). By getting the MAX of the region's low address | |
562 // (or the prevoius region's high address) and high(), we can tell if it | |
563 // is an intra or inter region growth. | |
564 size_t lower_needs = 0; | |
565 if (aligned_lower_new_high > lower_high()) { | |
566 lower_needs = | |
567 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); | |
568 } | |
569 size_t middle_needs = 0; | |
570 if (aligned_middle_new_high > middle_high()) { | |
571 middle_needs = | |
572 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); | |
573 } | |
574 size_t upper_needs = 0; | |
575 if (aligned_upper_new_high > upper_high()) { | |
576 upper_needs = | |
577 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); | |
578 } | |
579 | |
580 // Check contiguity. | |
581 assert(low_boundary() <= lower_high() && | |
582 lower_high() <= lower_high_boundary(), | |
583 "high address must be contained within the region"); | |
584 assert(lower_high_boundary() <= middle_high() && | |
585 middle_high() <= middle_high_boundary(), | |
586 "high address must be contained within the region"); | |
587 assert(middle_high_boundary() <= upper_high() && | |
588 upper_high() <= upper_high_boundary(), | |
589 "high address must be contained within the region"); | |
590 | |
591 // Commit regions | |
592 if (lower_needs > 0) { | |
593 assert(low_boundary() <= lower_high() && | |
594 lower_high() + lower_needs <= lower_high_boundary(), | |
595 "must not expand beyond region"); | |
596 if (!os::commit_memory(lower_high(), lower_needs)) { | |
597 debug_only(warning("os::commit_memory failed")); | |
598 return false; | |
599 } else { | |
600 _lower_high += lower_needs; | |
601 } | |
602 } | |
603 if (middle_needs > 0) { | |
604 assert(lower_high_boundary() <= middle_high() && | |
605 middle_high() + middle_needs <= middle_high_boundary(), | |
606 "must not expand beyond region"); | |
607 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) { | |
608 debug_only(warning("os::commit_memory failed")); | |
609 return false; | |
610 } | |
611 _middle_high += middle_needs; | |
612 } | |
613 if (upper_needs > 0) { | |
614 assert(middle_high_boundary() <= upper_high() && | |
615 upper_high() + upper_needs <= upper_high_boundary(), | |
616 "must not expand beyond region"); | |
617 if (!os::commit_memory(upper_high(), upper_needs)) { | |
618 debug_only(warning("os::commit_memory failed")); | |
619 return false; | |
620 } else { | |
621 _upper_high += upper_needs; | |
622 } | |
623 } | |
624 | |
625 if (pre_touch || AlwaysPreTouch) { | |
626 int vm_ps = os::vm_page_size(); | |
627 for (char* curr = previous_high; | |
628 curr < unaligned_new_high; | |
629 curr += vm_ps) { | |
630 // Note the use of a write here; originally we tried just a read, but | |
631 // since the value read was unused, the optimizer removed the read. | |
632 // If we ever have a concurrent touchahead thread, we'll want to use | |
633 // a read, to avoid the potential of overwriting data (if a mutator | |
634 // thread beats the touchahead thread to a page). There are various | |
635 // ways of making sure this read is not optimized away: for example, | |
636 // generating the code for a read procedure at runtime. | |
637 *curr = 0; | |
638 } | |
639 } | |
640 | |
641 _high += bytes; | |
642 return true; | |
643 } | |
644 | |
645 // A page is uncommitted if the contents of the entire page is deemed unusable. | |
646 // Continue to decrement the high() pointer until it reaches a page boundary | |
647 // in which case that particular page can now be uncommitted. | |
648 void VirtualSpace::shrink_by(size_t size) { | |
649 if (committed_size() < size) | |
650 fatal("Cannot shrink virtual space to negative size"); | |
651 | |
652 if (special()) { | |
653 // don't uncommit if the entire space is pinned in memory | |
654 _high -= size; | |
655 return; | |
656 } | |
657 | |
658 char* unaligned_new_high = high() - size; | |
659 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); | |
660 | |
661 // Calculate new unaligned address | |
662 char* unaligned_upper_new_high = | |
663 MAX2(unaligned_new_high, middle_high_boundary()); | |
664 char* unaligned_middle_new_high = | |
665 MAX2(unaligned_new_high, lower_high_boundary()); | |
666 char* unaligned_lower_new_high = | |
667 MAX2(unaligned_new_high, low_boundary()); | |
668 | |
669 // Align address to region's alignment | |
670 char* aligned_upper_new_high = | |
671 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); | |
672 char* aligned_middle_new_high = | |
673 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); | |
674 char* aligned_lower_new_high = | |
675 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); | |
676 | |
677 // Determine which regions need to shrink | |
678 size_t upper_needs = 0; | |
679 if (aligned_upper_new_high < upper_high()) { | |
680 upper_needs = | |
681 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); | |
682 } | |
683 size_t middle_needs = 0; | |
684 if (aligned_middle_new_high < middle_high()) { | |
685 middle_needs = | |
686 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); | |
687 } | |
688 size_t lower_needs = 0; | |
689 if (aligned_lower_new_high < lower_high()) { | |
690 lower_needs = | |
691 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); | |
692 } | |
693 | |
694 // Check contiguity. | |
695 assert(middle_high_boundary() <= upper_high() && | |
696 upper_high() <= upper_high_boundary(), | |
697 "high address must be contained within the region"); | |
698 assert(lower_high_boundary() <= middle_high() && | |
699 middle_high() <= middle_high_boundary(), | |
700 "high address must be contained within the region"); | |
701 assert(low_boundary() <= lower_high() && | |
702 lower_high() <= lower_high_boundary(), | |
703 "high address must be contained within the region"); | |
704 | |
705 // Uncommit | |
706 if (upper_needs > 0) { | |
707 assert(middle_high_boundary() <= aligned_upper_new_high && | |
708 aligned_upper_new_high + upper_needs <= upper_high_boundary(), | |
709 "must not shrink beyond region"); | |
710 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { | |
711 debug_only(warning("os::uncommit_memory failed")); | |
712 return; | |
713 } else { | |
714 _upper_high -= upper_needs; | |
715 } | |
716 } | |
717 if (middle_needs > 0) { | |
718 assert(lower_high_boundary() <= aligned_middle_new_high && | |
719 aligned_middle_new_high + middle_needs <= middle_high_boundary(), | |
720 "must not shrink beyond region"); | |
721 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { | |
722 debug_only(warning("os::uncommit_memory failed")); | |
723 return; | |
724 } else { | |
725 _middle_high -= middle_needs; | |
726 } | |
727 } | |
728 if (lower_needs > 0) { | |
729 assert(low_boundary() <= aligned_lower_new_high && | |
730 aligned_lower_new_high + lower_needs <= lower_high_boundary(), | |
731 "must not shrink beyond region"); | |
732 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { | |
733 debug_only(warning("os::uncommit_memory failed")); | |
734 return; | |
735 } else { | |
736 _lower_high -= lower_needs; | |
737 } | |
738 } | |
739 | |
740 _high -= size; | |
741 } | |
742 | |
743 #ifndef PRODUCT | |
744 void VirtualSpace::check_for_contiguity() { | |
745 // Check contiguity. | |
746 assert(low_boundary() <= lower_high() && | |
747 lower_high() <= lower_high_boundary(), | |
748 "high address must be contained within the region"); | |
749 assert(lower_high_boundary() <= middle_high() && | |
750 middle_high() <= middle_high_boundary(), | |
751 "high address must be contained within the region"); | |
752 assert(middle_high_boundary() <= upper_high() && | |
753 upper_high() <= upper_high_boundary(), | |
754 "high address must be contained within the region"); | |
755 assert(low() >= low_boundary(), "low"); | |
756 assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); | |
757 assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); | |
758 assert(high() <= upper_high(), "upper high"); | |
759 } | |
760 | |
761 void VirtualSpace::print() { | |
762 tty->print ("Virtual space:"); | |
763 if (special()) tty->print(" (pinned in memory)"); | |
764 tty->cr(); | |
765 tty->print_cr(" - committed: %ld", committed_size()); | |
766 tty->print_cr(" - reserved: %ld", reserved_size()); | |
767 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high()); | |
768 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary()); | |
769 } | |
770 | |
771 #endif |