Mercurial > hg > graal-jvmci-8
annotate src/share/vm/runtime/virtualspace.cpp @ 1966:4110c3e0c50d
Merge
author | iveresov |
---|---|
date | Fri, 19 Nov 2010 17:01:34 -0800 |
parents | 5f249b390094 |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_virtualspace.cpp.incl" | |
27 | |
28 | |
29 // ReservedSpace | |
30 ReservedSpace::ReservedSpace(size_t size) { | |
656 | 31 initialize(size, 0, false, NULL, 0, false); |
0 | 32 } |
33 | |
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment, | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
35 bool large, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
36 char* requested_address, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
37 const size_t noaccess_prefix) { |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
38 initialize(size+noaccess_prefix, alignment, large, requested_address, |
656 | 39 noaccess_prefix, false); |
40 } | |
41 | |
42 ReservedSpace::ReservedSpace(size_t size, size_t alignment, | |
43 bool large, | |
44 bool executable) { | |
45 initialize(size, alignment, large, NULL, 0, executable); | |
0 | 46 } |
47 | |
48 char * | |
49 ReservedSpace::align_reserved_region(char* addr, const size_t len, | |
50 const size_t prefix_size, | |
51 const size_t prefix_align, | |
52 const size_t suffix_size, | |
53 const size_t suffix_align) | |
54 { | |
55 assert(addr != NULL, "sanity"); | |
56 const size_t required_size = prefix_size + suffix_size; | |
57 assert(len >= required_size, "len too small"); | |
58 | |
59 const size_t s = size_t(addr); | |
60 const size_t beg_ofs = s + prefix_size & suffix_align - 1; | |
61 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs; | |
62 | |
63 if (len < beg_delta + required_size) { | |
64 return NULL; // Cannot do proper alignment. | |
65 } | |
66 const size_t end_delta = len - (beg_delta + required_size); | |
67 | |
68 if (beg_delta != 0) { | |
69 os::release_memory(addr, beg_delta); | |
70 } | |
71 | |
72 if (end_delta != 0) { | |
73 char* release_addr = (char*) (s + beg_delta + required_size); | |
74 os::release_memory(release_addr, end_delta); | |
75 } | |
76 | |
77 return (char*) (s + beg_delta); | |
78 } | |
79 | |
80 char* ReservedSpace::reserve_and_align(const size_t reserve_size, | |
81 const size_t prefix_size, | |
82 const size_t prefix_align, | |
83 const size_t suffix_size, | |
84 const size_t suffix_align) | |
85 { | |
86 assert(reserve_size > prefix_size + suffix_size, "should not be here"); | |
87 | |
88 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align); | |
89 if (raw_addr == NULL) return NULL; | |
90 | |
91 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size, | |
92 prefix_align, suffix_size, | |
93 suffix_align); | |
94 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) { | |
95 fatal("os::release_memory failed"); | |
96 } | |
97 | |
98 #ifdef ASSERT | |
99 if (result != NULL) { | |
100 const size_t raw = size_t(raw_addr); | |
101 const size_t res = size_t(result); | |
102 assert(res >= raw, "alignment decreased start addr"); | |
103 assert(res + prefix_size + suffix_size <= raw + reserve_size, | |
104 "alignment increased end addr"); | |
105 assert((res & prefix_align - 1) == 0, "bad alignment of prefix"); | |
106 assert((res + prefix_size & suffix_align - 1) == 0, | |
107 "bad alignment of suffix"); | |
108 } | |
109 #endif | |
110 | |
111 return result; | |
112 } | |
113 | |
1618 | 114 // Helper method. |
115 static bool failed_to_reserve_as_requested(char* base, char* requested_address, | |
116 const size_t size, bool special) | |
117 { | |
118 if (base == requested_address || requested_address == NULL) | |
119 return false; // did not fail | |
120 | |
121 if (base != NULL) { | |
122 // Different reserve address may be acceptable in other cases | |
123 // but for compressed oops heap should be at requested address. | |
124 assert(UseCompressedOops, "currently requested address used only for compressed oops"); | |
125 if (PrintCompressedOopsMode) { | |
126 tty->cr(); | |
127 tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address); | |
128 } | |
129 // OS ignored requested address. Try different address. | |
130 if (special) { | |
131 if (!os::release_memory_special(base, size)) { | |
132 fatal("os::release_memory_special failed"); | |
133 } | |
134 } else { | |
135 if (!os::release_memory(base, size)) { | |
136 fatal("os::release_memory failed"); | |
137 } | |
138 } | |
139 } | |
140 return true; | |
141 } | |
142 | |
0 | 143 ReservedSpace::ReservedSpace(const size_t prefix_size, |
144 const size_t prefix_align, | |
145 const size_t suffix_size, | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
146 const size_t suffix_align, |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
147 char* requested_address, |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
148 const size_t noaccess_prefix) |
0 | 149 { |
150 assert(prefix_size != 0, "sanity"); | |
151 assert(prefix_align != 0, "sanity"); | |
152 assert(suffix_size != 0, "sanity"); | |
153 assert(suffix_align != 0, "sanity"); | |
154 assert((prefix_size & prefix_align - 1) == 0, | |
155 "prefix_size not divisible by prefix_align"); | |
156 assert((suffix_size & suffix_align - 1) == 0, | |
157 "suffix_size not divisible by suffix_align"); | |
158 assert((suffix_align & prefix_align - 1) == 0, | |
159 "suffix_align not divisible by prefix_align"); | |
160 | |
1618 | 161 // Assert that if noaccess_prefix is used, it is the same as prefix_align. |
162 assert(noaccess_prefix == 0 || | |
163 noaccess_prefix == prefix_align, "noaccess prefix wrong"); | |
164 | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
165 // Add in noaccess_prefix to prefix_size; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
166 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
167 const size_t size = adjusted_prefix_size + suffix_size; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
168 |
0 | 169 // On systems where the entire region has to be reserved and committed up |
170 // front, the compound alignment normally done by this method is unnecessary. | |
171 const bool try_reserve_special = UseLargePages && | |
172 prefix_align == os::large_page_size(); | |
173 if (!os::can_commit_large_page_memory() && try_reserve_special) { | |
656 | 174 initialize(size, prefix_align, true, requested_address, noaccess_prefix, |
175 false); | |
0 | 176 return; |
177 } | |
178 | |
179 _base = NULL; | |
180 _size = 0; | |
181 _alignment = 0; | |
182 _special = false; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
183 _noaccess_prefix = 0; |
656 | 184 _executable = false; |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
185 |
0 | 186 // Optimistically try to reserve the exact size needed. |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
187 char* addr; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
188 if (requested_address != 0) { |
1618 | 189 requested_address -= noaccess_prefix; // adjust address |
190 assert(requested_address != NULL, "huge noaccess prefix?"); | |
191 addr = os::attempt_reserve_memory_at(size, requested_address); | |
192 if (failed_to_reserve_as_requested(addr, requested_address, size, false)) { | |
193 // OS ignored requested address. Try different address. | |
194 addr = NULL; | |
195 } | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
196 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
197 addr = os::reserve_memory(size, NULL, prefix_align); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
198 } |
0 | 199 if (addr == NULL) return; |
200 | |
201 // Check whether the result has the needed alignment (unlikely unless | |
202 // prefix_align == suffix_align). | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
203 const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1; |
0 | 204 if (ofs != 0) { |
205 // Wrong alignment. Release, allocate more space and do manual alignment. | |
206 // | |
207 // On most operating systems, another allocation with a somewhat larger size | |
208 // will return an address "close to" that of the previous allocation. The | |
209 // result is often the same address (if the kernel hands out virtual | |
210 // addresses from low to high), or an address that is offset by the increase | |
211 // in size. Exploit that to minimize the amount of extra space requested. | |
212 if (!os::release_memory(addr, size)) { | |
213 fatal("os::release_memory failed"); | |
214 } | |
215 | |
216 const size_t extra = MAX2(ofs, suffix_align - ofs); | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
217 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align, |
0 | 218 suffix_size, suffix_align); |
219 if (addr == NULL) { | |
220 // Try an even larger region. If this fails, address space is exhausted. | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
221 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, |
0 | 222 prefix_align, suffix_size, suffix_align); |
223 } | |
224 } | |
225 | |
226 _base = addr; | |
227 _size = size; | |
228 _alignment = prefix_align; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
229 _noaccess_prefix = noaccess_prefix; |
0 | 230 } |
231 | |
232 void ReservedSpace::initialize(size_t size, size_t alignment, bool large, | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
233 char* requested_address, |
656 | 234 const size_t noaccess_prefix, |
235 bool executable) { | |
0 | 236 const size_t granularity = os::vm_allocation_granularity(); |
237 assert((size & granularity - 1) == 0, | |
238 "size not aligned to os::vm_allocation_granularity()"); | |
239 assert((alignment & granularity - 1) == 0, | |
240 "alignment not aligned to os::vm_allocation_granularity()"); | |
241 assert(alignment == 0 || is_power_of_2((intptr_t)alignment), | |
242 "not a power of 2"); | |
243 | |
244 _base = NULL; | |
245 _size = 0; | |
246 _special = false; | |
656 | 247 _executable = executable; |
0 | 248 _alignment = 0; |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
249 _noaccess_prefix = 0; |
0 | 250 if (size == 0) { |
251 return; | |
252 } | |
253 | |
254 // If OS doesn't support demand paging for large page memory, we need | |
255 // to use reserve_memory_special() to reserve and pin the entire region. | |
256 bool special = large && !os::can_commit_large_page_memory(); | |
257 char* base = NULL; | |
258 | |
1618 | 259 if (requested_address != 0) { |
260 requested_address -= noaccess_prefix; // adjust requested address | |
261 assert(requested_address != NULL, "huge noaccess prefix?"); | |
262 } | |
263 | |
0 | 264 if (special) { |
265 | |
656 | 266 base = os::reserve_memory_special(size, requested_address, executable); |
0 | 267 |
268 if (base != NULL) { | |
1618 | 269 if (failed_to_reserve_as_requested(base, requested_address, size, true)) { |
270 // OS ignored requested address. Try different address. | |
271 return; | |
272 } | |
0 | 273 // Check alignment constraints |
274 if (alignment > 0) { | |
275 assert((uintptr_t) base % alignment == 0, | |
276 "Large pages returned a non-aligned address"); | |
277 } | |
278 _special = true; | |
279 } else { | |
280 // failed; try to reserve regular memory below | |
1618 | 281 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || |
282 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { | |
283 if (PrintCompressedOopsMode) { | |
284 tty->cr(); | |
285 tty->print_cr("Reserve regular memory without large pages."); | |
286 } | |
287 } | |
0 | 288 } |
289 } | |
290 | |
291 if (base == NULL) { | |
292 // Optimistically assume that the OSes returns an aligned base pointer. | |
293 // When reserving a large address range, most OSes seem to align to at | |
294 // least 64K. | |
295 | |
296 // If the memory was requested at a particular address, use | |
297 // os::attempt_reserve_memory_at() to avoid over mapping something | |
298 // important. If available space is not detected, return NULL. | |
299 | |
300 if (requested_address != 0) { | |
1618 | 301 base = os::attempt_reserve_memory_at(size, requested_address); |
302 if (failed_to_reserve_as_requested(base, requested_address, size, false)) { | |
303 // OS ignored requested address. Try different address. | |
304 base = NULL; | |
305 } | |
0 | 306 } else { |
307 base = os::reserve_memory(size, NULL, alignment); | |
308 } | |
309 | |
310 if (base == NULL) return; | |
311 | |
312 // Check alignment constraints | |
313 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) { | |
314 // Base not aligned, retry | |
315 if (!os::release_memory(base, size)) fatal("os::release_memory failed"); | |
316 // Reserve size large enough to do manual alignment and | |
317 // increase size to a multiple of the desired alignment | |
318 size = align_size_up(size, alignment); | |
319 size_t extra_size = size + alignment; | |
342 | 320 do { |
321 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); | |
322 if (extra_base == NULL) return; | |
323 // Do manual alignement | |
324 base = (char*) align_size_up((uintptr_t) extra_base, alignment); | |
325 assert(base >= extra_base, "just checking"); | |
326 // Re-reserve the region at the aligned base address. | |
327 os::release_memory(extra_base, extra_size); | |
328 base = os::reserve_memory(size, base); | |
329 } while (base == NULL); | |
0 | 330 } |
331 } | |
332 // Done | |
333 _base = base; | |
334 _size = size; | |
335 _alignment = MAX2(alignment, (size_t) os::vm_page_size()); | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
336 _noaccess_prefix = noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
337 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
338 // Assert that if noaccess_prefix is used, it is the same as alignment. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
339 assert(noaccess_prefix == 0 || |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
340 noaccess_prefix == _alignment, "noaccess prefix wrong"); |
0 | 341 |
342 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, | |
343 "area must be distinguisable from marks for mark-sweep"); | |
344 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], | |
345 "area must be distinguisable from marks for mark-sweep"); | |
346 } | |
347 | |
348 | |
349 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, | |
656 | 350 bool special, bool executable) { |
0 | 351 assert((size % os::vm_allocation_granularity()) == 0, |
352 "size not allocation aligned"); | |
353 _base = base; | |
354 _size = size; | |
355 _alignment = alignment; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
356 _noaccess_prefix = 0; |
0 | 357 _special = special; |
656 | 358 _executable = executable; |
0 | 359 } |
360 | |
361 | |
362 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, | |
363 bool split, bool realloc) { | |
364 assert(partition_size <= size(), "partition failed"); | |
365 if (split) { | |
656 | 366 os::split_reserved_memory(base(), size(), partition_size, realloc); |
0 | 367 } |
656 | 368 ReservedSpace result(base(), partition_size, alignment, special(), |
369 executable()); | |
0 | 370 return result; |
371 } | |
372 | |
373 | |
374 ReservedSpace | |
375 ReservedSpace::last_part(size_t partition_size, size_t alignment) { | |
376 assert(partition_size <= size(), "partition failed"); | |
377 ReservedSpace result(base() + partition_size, size() - partition_size, | |
656 | 378 alignment, special(), executable()); |
0 | 379 return result; |
380 } | |
381 | |
382 | |
383 size_t ReservedSpace::page_align_size_up(size_t size) { | |
384 return align_size_up(size, os::vm_page_size()); | |
385 } | |
386 | |
387 | |
388 size_t ReservedSpace::page_align_size_down(size_t size) { | |
389 return align_size_down(size, os::vm_page_size()); | |
390 } | |
391 | |
392 | |
393 size_t ReservedSpace::allocation_align_size_up(size_t size) { | |
394 return align_size_up(size, os::vm_allocation_granularity()); | |
395 } | |
396 | |
397 | |
398 size_t ReservedSpace::allocation_align_size_down(size_t size) { | |
399 return align_size_down(size, os::vm_allocation_granularity()); | |
400 } | |
401 | |
402 | |
403 void ReservedSpace::release() { | |
404 if (is_reserved()) { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
405 char *real_base = _base - _noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
406 const size_t real_size = _size + _noaccess_prefix; |
0 | 407 if (special()) { |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
408 os::release_memory_special(real_base, real_size); |
0 | 409 } else{ |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
410 os::release_memory(real_base, real_size); |
0 | 411 } |
412 _base = NULL; | |
413 _size = 0; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
414 _noaccess_prefix = 0; |
0 | 415 _special = false; |
656 | 416 _executable = false; |
0 | 417 } |
418 } | |
419 | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
420 void ReservedSpace::protect_noaccess_prefix(const size_t size) { |
1618 | 421 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL && |
422 (size_t(_base + _size) > OopEncodingHeapMax) && | |
423 Universe::narrow_oop_use_implicit_null_checks()), | |
424 "noaccess_prefix should be used only with non zero based compressed oops"); | |
425 | |
426 // If there is no noaccess prefix, return. | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
427 if (_noaccess_prefix == 0) return; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
428 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
429 assert(_noaccess_prefix >= (size_t)os::vm_page_size(), |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
430 "must be at least page size big"); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
431 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
432 // Protect memory at the base of the allocated region. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
433 // If special, the page was committed (only matters on windows) |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
434 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
435 _special)) { |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
436 fatal("cannot protect protection page"); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
437 } |
1618 | 438 if (PrintCompressedOopsMode) { |
439 tty->cr(); | |
440 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix); | |
441 } | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
442 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
443 _base += _noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
444 _size -= _noaccess_prefix; |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
445 assert((size == _size) && ((uintptr_t)_base % _alignment == 0), |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
446 "must be exactly of required size and alignment"); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
447 } |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
448 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
449 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
450 bool large, char* requested_address) : |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
451 ReservedSpace(size, alignment, large, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
452 requested_address, |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
453 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
454 Universe::narrow_oop_use_implicit_null_checks()) ? |
325
93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
237
diff
changeset
|
455 lcm(os::vm_page_size(), alignment) : 0) { |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
456 // Only reserved space for the java heap should have a noaccess_prefix |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
457 // if using compressed oops. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
458 protect_noaccess_prefix(size); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
459 } |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
460 |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
461 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
462 const size_t prefix_align, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
463 const size_t suffix_size, |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
464 const size_t suffix_align, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
465 char* requested_address) : |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
466 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align, |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
467 requested_address, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
468 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
380
diff
changeset
|
469 Universe::narrow_oop_use_implicit_null_checks()) ? |
325
93befa083681
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
237
diff
changeset
|
470 lcm(os::vm_page_size(), prefix_align) : 0) { |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
471 protect_noaccess_prefix(prefix_size+suffix_size); |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
472 } |
0 | 473 |
656 | 474 // Reserve space for code segment. Same as Java heap only we mark this as |
475 // executable. | |
476 ReservedCodeSpace::ReservedCodeSpace(size_t r_size, | |
477 size_t rs_align, | |
478 bool large) : | |
479 ReservedSpace(r_size, rs_align, large, /*executable*/ true) { | |
480 } | |
481 | |
0 | 482 // VirtualSpace |
483 | |
484 VirtualSpace::VirtualSpace() { | |
485 _low_boundary = NULL; | |
486 _high_boundary = NULL; | |
487 _low = NULL; | |
488 _high = NULL; | |
489 _lower_high = NULL; | |
490 _middle_high = NULL; | |
491 _upper_high = NULL; | |
492 _lower_high_boundary = NULL; | |
493 _middle_high_boundary = NULL; | |
494 _upper_high_boundary = NULL; | |
495 _lower_alignment = 0; | |
496 _middle_alignment = 0; | |
497 _upper_alignment = 0; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
498 _special = false; |
656 | 499 _executable = false; |
0 | 500 } |
501 | |
502 | |
503 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { | |
504 if(!rs.is_reserved()) return false; // allocation failed. | |
505 assert(_low_boundary == NULL, "VirtualSpace already initialized"); | |
506 _low_boundary = rs.base(); | |
507 _high_boundary = low_boundary() + rs.size(); | |
508 | |
509 _low = low_boundary(); | |
510 _high = low(); | |
511 | |
512 _special = rs.special(); | |
656 | 513 _executable = rs.executable(); |
0 | 514 |
515 // When a VirtualSpace begins life at a large size, make all future expansion | |
516 // and shrinking occur aligned to a granularity of large pages. This avoids | |
517 // fragmentation of physical addresses that inhibits the use of large pages | |
518 // by the OS virtual memory system. Empirically, we see that with a 4MB | |
519 // page size, the only spaces that get handled this way are codecache and | |
520 // the heap itself, both of which provide a substantial performance | |
521 // boost in many benchmarks when covered by large pages. | |
522 // | |
523 // No attempt is made to force large page alignment at the very top and | |
524 // bottom of the space if they are not aligned so already. | |
525 _lower_alignment = os::vm_page_size(); | |
526 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); | |
527 _upper_alignment = os::vm_page_size(); | |
528 | |
529 // End of each region | |
530 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); | |
531 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); | |
532 _upper_high_boundary = high_boundary(); | |
533 | |
534 // High address of each region | |
535 _lower_high = low_boundary(); | |
536 _middle_high = lower_high_boundary(); | |
537 _upper_high = middle_high_boundary(); | |
538 | |
539 // commit to initial size | |
540 if (committed_size > 0) { | |
541 if (!expand_by(committed_size)) { | |
542 return false; | |
543 } | |
544 } | |
545 return true; | |
546 } | |
547 | |
548 | |
549 VirtualSpace::~VirtualSpace() { | |
550 release(); | |
551 } | |
552 | |
553 | |
554 void VirtualSpace::release() { | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
555 // This does not release memory it never reserved. |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
0
diff
changeset
|
556 // Caller must release via rs.release(); |
0 | 557 _low_boundary = NULL; |
558 _high_boundary = NULL; | |
559 _low = NULL; | |
560 _high = NULL; | |
561 _lower_high = NULL; | |
562 _middle_high = NULL; | |
563 _upper_high = NULL; | |
564 _lower_high_boundary = NULL; | |
565 _middle_high_boundary = NULL; | |
566 _upper_high_boundary = NULL; | |
567 _lower_alignment = 0; | |
568 _middle_alignment = 0; | |
569 _upper_alignment = 0; | |
570 _special = false; | |
656 | 571 _executable = false; |
0 | 572 } |
573 | |
574 | |
575 size_t VirtualSpace::committed_size() const { | |
576 return pointer_delta(high(), low(), sizeof(char)); | |
577 } | |
578 | |
579 | |
580 size_t VirtualSpace::reserved_size() const { | |
581 return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); | |
582 } | |
583 | |
584 | |
585 size_t VirtualSpace::uncommitted_size() const { | |
586 return reserved_size() - committed_size(); | |
587 } | |
588 | |
589 | |
590 bool VirtualSpace::contains(const void* p) const { | |
591 return low() <= (const char*) p && (const char*) p < high(); | |
592 } | |
593 | |
594 /* | |
595 First we need to determine if a particular virtual space is using large | |
596 pages. This is done at the initialize function and only virtual spaces | |
597 that are larger than LargePageSizeInBytes use large pages. Once we | |
598 have determined this, all expand_by and shrink_by calls must grow and | |
599 shrink by large page size chunks. If a particular request | |
600 is within the current large page, the call to commit and uncommit memory | |
601 can be ignored. In the case that the low and high boundaries of this | |
602 space is not large page aligned, the pages leading to the first large | |
603 page address and the pages after the last large page address must be | |
604 allocated with default pages. | |
605 */ | |
606 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { | |
607 if (uncommitted_size() < bytes) return false; | |
608 | |
609 if (special()) { | |
610 // don't commit memory if the entire space is pinned in memory | |
611 _high += bytes; | |
612 return true; | |
613 } | |
614 | |
615 char* previous_high = high(); | |
616 char* unaligned_new_high = high() + bytes; | |
617 assert(unaligned_new_high <= high_boundary(), | |
618 "cannot expand by more than upper boundary"); | |
619 | |
620 // Calculate where the new high for each of the regions should be. If | |
621 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned | |
622 // then the unaligned lower and upper new highs would be the | |
623 // lower_high() and upper_high() respectively. | |
624 char* unaligned_lower_new_high = | |
625 MIN2(unaligned_new_high, lower_high_boundary()); | |
626 char* unaligned_middle_new_high = | |
627 MIN2(unaligned_new_high, middle_high_boundary()); | |
628 char* unaligned_upper_new_high = | |
629 MIN2(unaligned_new_high, upper_high_boundary()); | |
630 | |
631 // Align the new highs based on the regions alignment. lower and upper | |
632 // alignment will always be default page size. middle alignment will be | |
633 // LargePageSizeInBytes if the actual size of the virtual space is in | |
634 // fact larger than LargePageSizeInBytes. | |
635 char* aligned_lower_new_high = | |
636 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); | |
637 char* aligned_middle_new_high = | |
638 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); | |
639 char* aligned_upper_new_high = | |
640 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); | |
641 | |
642 // Determine which regions need to grow in this expand_by call. | |
643 // If you are growing in the lower region, high() must be in that | |
644 // region so calcuate the size based on high(). For the middle and | |
645 // upper regions, determine the starting point of growth based on the | |
646 // location of high(). By getting the MAX of the region's low address | |
647 // (or the prevoius region's high address) and high(), we can tell if it | |
648 // is an intra or inter region growth. | |
649 size_t lower_needs = 0; | |
650 if (aligned_lower_new_high > lower_high()) { | |
651 lower_needs = | |
652 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); | |
653 } | |
654 size_t middle_needs = 0; | |
655 if (aligned_middle_new_high > middle_high()) { | |
656 middle_needs = | |
657 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); | |
658 } | |
659 size_t upper_needs = 0; | |
660 if (aligned_upper_new_high > upper_high()) { | |
661 upper_needs = | |
662 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); | |
663 } | |
664 | |
665 // Check contiguity. | |
666 assert(low_boundary() <= lower_high() && | |
667 lower_high() <= lower_high_boundary(), | |
668 "high address must be contained within the region"); | |
669 assert(lower_high_boundary() <= middle_high() && | |
670 middle_high() <= middle_high_boundary(), | |
671 "high address must be contained within the region"); | |
672 assert(middle_high_boundary() <= upper_high() && | |
673 upper_high() <= upper_high_boundary(), | |
674 "high address must be contained within the region"); | |
675 | |
676 // Commit regions | |
677 if (lower_needs > 0) { | |
678 assert(low_boundary() <= lower_high() && | |
679 lower_high() + lower_needs <= lower_high_boundary(), | |
680 "must not expand beyond region"); | |
656 | 681 if (!os::commit_memory(lower_high(), lower_needs, _executable)) { |
0 | 682 debug_only(warning("os::commit_memory failed")); |
683 return false; | |
684 } else { | |
685 _lower_high += lower_needs; | |
686 } | |
687 } | |
688 if (middle_needs > 0) { | |
689 assert(lower_high_boundary() <= middle_high() && | |
690 middle_high() + middle_needs <= middle_high_boundary(), | |
691 "must not expand beyond region"); | |
656 | 692 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), |
693 _executable)) { | |
0 | 694 debug_only(warning("os::commit_memory failed")); |
695 return false; | |
696 } | |
697 _middle_high += middle_needs; | |
698 } | |
699 if (upper_needs > 0) { | |
700 assert(middle_high_boundary() <= upper_high() && | |
701 upper_high() + upper_needs <= upper_high_boundary(), | |
702 "must not expand beyond region"); | |
656 | 703 if (!os::commit_memory(upper_high(), upper_needs, _executable)) { |
0 | 704 debug_only(warning("os::commit_memory failed")); |
705 return false; | |
706 } else { | |
707 _upper_high += upper_needs; | |
708 } | |
709 } | |
710 | |
711 if (pre_touch || AlwaysPreTouch) { | |
712 int vm_ps = os::vm_page_size(); | |
713 for (char* curr = previous_high; | |
714 curr < unaligned_new_high; | |
715 curr += vm_ps) { | |
716 // Note the use of a write here; originally we tried just a read, but | |
717 // since the value read was unused, the optimizer removed the read. | |
718 // If we ever have a concurrent touchahead thread, we'll want to use | |
719 // a read, to avoid the potential of overwriting data (if a mutator | |
720 // thread beats the touchahead thread to a page). There are various | |
721 // ways of making sure this read is not optimized away: for example, | |
722 // generating the code for a read procedure at runtime. | |
723 *curr = 0; | |
724 } | |
725 } | |
726 | |
727 _high += bytes; | |
728 return true; | |
729 } | |
730 | |
731 // A page is uncommitted if the contents of the entire page is deemed unusable. | |
732 // Continue to decrement the high() pointer until it reaches a page boundary | |
733 // in which case that particular page can now be uncommitted. | |
734 void VirtualSpace::shrink_by(size_t size) { | |
735 if (committed_size() < size) | |
736 fatal("Cannot shrink virtual space to negative size"); | |
737 | |
738 if (special()) { | |
739 // don't uncommit if the entire space is pinned in memory | |
740 _high -= size; | |
741 return; | |
742 } | |
743 | |
744 char* unaligned_new_high = high() - size; | |
745 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); | |
746 | |
747 // Calculate new unaligned address | |
748 char* unaligned_upper_new_high = | |
749 MAX2(unaligned_new_high, middle_high_boundary()); | |
750 char* unaligned_middle_new_high = | |
751 MAX2(unaligned_new_high, lower_high_boundary()); | |
752 char* unaligned_lower_new_high = | |
753 MAX2(unaligned_new_high, low_boundary()); | |
754 | |
755 // Align address to region's alignment | |
756 char* aligned_upper_new_high = | |
757 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); | |
758 char* aligned_middle_new_high = | |
759 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); | |
760 char* aligned_lower_new_high = | |
761 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); | |
762 | |
763 // Determine which regions need to shrink | |
764 size_t upper_needs = 0; | |
765 if (aligned_upper_new_high < upper_high()) { | |
766 upper_needs = | |
767 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); | |
768 } | |
769 size_t middle_needs = 0; | |
770 if (aligned_middle_new_high < middle_high()) { | |
771 middle_needs = | |
772 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); | |
773 } | |
774 size_t lower_needs = 0; | |
775 if (aligned_lower_new_high < lower_high()) { | |
776 lower_needs = | |
777 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); | |
778 } | |
779 | |
780 // Check contiguity. | |
781 assert(middle_high_boundary() <= upper_high() && | |
782 upper_high() <= upper_high_boundary(), | |
783 "high address must be contained within the region"); | |
784 assert(lower_high_boundary() <= middle_high() && | |
785 middle_high() <= middle_high_boundary(), | |
786 "high address must be contained within the region"); | |
787 assert(low_boundary() <= lower_high() && | |
788 lower_high() <= lower_high_boundary(), | |
789 "high address must be contained within the region"); | |
790 | |
791 // Uncommit | |
792 if (upper_needs > 0) { | |
793 assert(middle_high_boundary() <= aligned_upper_new_high && | |
794 aligned_upper_new_high + upper_needs <= upper_high_boundary(), | |
795 "must not shrink beyond region"); | |
796 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { | |
797 debug_only(warning("os::uncommit_memory failed")); | |
798 return; | |
799 } else { | |
800 _upper_high -= upper_needs; | |
801 } | |
802 } | |
803 if (middle_needs > 0) { | |
804 assert(lower_high_boundary() <= aligned_middle_new_high && | |
805 aligned_middle_new_high + middle_needs <= middle_high_boundary(), | |
806 "must not shrink beyond region"); | |
807 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { | |
808 debug_only(warning("os::uncommit_memory failed")); | |
809 return; | |
810 } else { | |
811 _middle_high -= middle_needs; | |
812 } | |
813 } | |
814 if (lower_needs > 0) { | |
815 assert(low_boundary() <= aligned_lower_new_high && | |
816 aligned_lower_new_high + lower_needs <= lower_high_boundary(), | |
817 "must not shrink beyond region"); | |
818 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { | |
819 debug_only(warning("os::uncommit_memory failed")); | |
820 return; | |
821 } else { | |
822 _lower_high -= lower_needs; | |
823 } | |
824 } | |
825 | |
826 _high -= size; | |
827 } | |
828 | |
829 #ifndef PRODUCT | |
830 void VirtualSpace::check_for_contiguity() { | |
831 // Check contiguity. | |
832 assert(low_boundary() <= lower_high() && | |
833 lower_high() <= lower_high_boundary(), | |
834 "high address must be contained within the region"); | |
835 assert(lower_high_boundary() <= middle_high() && | |
836 middle_high() <= middle_high_boundary(), | |
837 "high address must be contained within the region"); | |
838 assert(middle_high_boundary() <= upper_high() && | |
839 upper_high() <= upper_high_boundary(), | |
840 "high address must be contained within the region"); | |
841 assert(low() >= low_boundary(), "low"); | |
842 assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); | |
843 assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); | |
844 assert(high() <= upper_high(), "upper high"); | |
845 } | |
846 | |
847 void VirtualSpace::print() { | |
848 tty->print ("Virtual space:"); | |
849 if (special()) tty->print(" (pinned in memory)"); | |
850 tty->cr(); | |
851 tty->print_cr(" - committed: %ld", committed_size()); | |
852 tty->print_cr(" - reserved: %ld", reserved_size()); | |
853 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high()); | |
854 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary()); | |
855 } | |
856 | |
857 #endif |