comparison src/share/vm/runtime/virtualspace.cpp @ 237:1fdb98a17101

6716785: implicit null checks not triggering with CompressedOops Summary: allocate alignment-sized page(s) below java heap so that memory accesses at heap_base+1page give signal and cause an implicit null check Reviewed-by: kvn, jmasa, phh, jcoomes
author coleenp
date Sat, 19 Jul 2008 17:38:22 -0400
parents a61af66fc99e
children 93befa083681 1ee8caae33af
comparison
equal deleted inserted replaced
235:9c2ecc2ffb12 237:1fdb98a17101
26 #include "incls/_virtualspace.cpp.incl" 26 #include "incls/_virtualspace.cpp.incl"
27 27
28 28
29 // ReservedSpace 29 // ReservedSpace
30 ReservedSpace::ReservedSpace(size_t size) { 30 ReservedSpace::ReservedSpace(size_t size) {
31 initialize(size, 0, false, NULL); 31 initialize(size, 0, false, NULL, 0);
32 } 32 }
33 33
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment, 34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
35 bool large, char* requested_address) { 35 bool large,
36 initialize(size, alignment, large, requested_address); 36 char* requested_address,
37 const size_t noaccess_prefix) {
38 initialize(size+noaccess_prefix, alignment, large, requested_address,
39 noaccess_prefix);
37 } 40 }
38 41
39 char * 42 char *
40 ReservedSpace::align_reserved_region(char* addr, const size_t len, 43 ReservedSpace::align_reserved_region(char* addr, const size_t len,
41 const size_t prefix_size, 44 const size_t prefix_size,
103 } 106 }
104 107
105 ReservedSpace::ReservedSpace(const size_t prefix_size, 108 ReservedSpace::ReservedSpace(const size_t prefix_size,
106 const size_t prefix_align, 109 const size_t prefix_align,
107 const size_t suffix_size, 110 const size_t suffix_size,
108 const size_t suffix_align) 111 const size_t suffix_align,
112 const size_t noaccess_prefix)
109 { 113 {
110 assert(prefix_size != 0, "sanity"); 114 assert(prefix_size != 0, "sanity");
111 assert(prefix_align != 0, "sanity"); 115 assert(prefix_align != 0, "sanity");
112 assert(suffix_size != 0, "sanity"); 116 assert(suffix_size != 0, "sanity");
113 assert(suffix_align != 0, "sanity"); 117 assert(suffix_align != 0, "sanity");
116 assert((suffix_size & suffix_align - 1) == 0, 120 assert((suffix_size & suffix_align - 1) == 0,
117 "suffix_size not divisible by suffix_align"); 121 "suffix_size not divisible by suffix_align");
118 assert((suffix_align & prefix_align - 1) == 0, 122 assert((suffix_align & prefix_align - 1) == 0,
119 "suffix_align not divisible by prefix_align"); 123 "suffix_align not divisible by prefix_align");
120 124
125 // Add in noaccess_prefix to prefix_size;
126 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
127 const size_t size = adjusted_prefix_size + suffix_size;
128
121 // On systems where the entire region has to be reserved and committed up 129 // On systems where the entire region has to be reserved and committed up
122 // front, the compound alignment normally done by this method is unnecessary. 130 // front, the compound alignment normally done by this method is unnecessary.
123 const bool try_reserve_special = UseLargePages && 131 const bool try_reserve_special = UseLargePages &&
124 prefix_align == os::large_page_size(); 132 prefix_align == os::large_page_size();
125 if (!os::can_commit_large_page_memory() && try_reserve_special) { 133 if (!os::can_commit_large_page_memory() && try_reserve_special) {
126 initialize(prefix_size + suffix_size, prefix_align, true); 134 initialize(size, prefix_align, true, NULL, noaccess_prefix);
127 return; 135 return;
128 } 136 }
129 137
130 _base = NULL; 138 _base = NULL;
131 _size = 0; 139 _size = 0;
132 _alignment = 0; 140 _alignment = 0;
133 _special = false; 141 _special = false;
142 _noaccess_prefix = 0;
143
144 // Assert that if noaccess_prefix is used, it is the same as prefix_align.
145 assert(noaccess_prefix == 0 ||
146 noaccess_prefix == prefix_align, "noaccess prefix wrong");
134 147
135 // Optimistically try to reserve the exact size needed. 148 // Optimistically try to reserve the exact size needed.
136 const size_t size = prefix_size + suffix_size;
137 char* addr = os::reserve_memory(size, NULL, prefix_align); 149 char* addr = os::reserve_memory(size, NULL, prefix_align);
138 if (addr == NULL) return; 150 if (addr == NULL) return;
139 151
140 // Check whether the result has the needed alignment (unlikely unless 152 // Check whether the result has the needed alignment (unlikely unless
141 // prefix_align == suffix_align). 153 // prefix_align == suffix_align).
142 const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1; 154 const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
143 if (ofs != 0) { 155 if (ofs != 0) {
144 // Wrong alignment. Release, allocate more space and do manual alignment. 156 // Wrong alignment. Release, allocate more space and do manual alignment.
145 // 157 //
146 // On most operating systems, another allocation with a somewhat larger size 158 // On most operating systems, another allocation with a somewhat larger size
147 // will return an address "close to" that of the previous allocation. The 159 // will return an address "close to" that of the previous allocation. The
151 if (!os::release_memory(addr, size)) { 163 if (!os::release_memory(addr, size)) {
152 fatal("os::release_memory failed"); 164 fatal("os::release_memory failed");
153 } 165 }
154 166
155 const size_t extra = MAX2(ofs, suffix_align - ofs); 167 const size_t extra = MAX2(ofs, suffix_align - ofs);
156 addr = reserve_and_align(size + extra, prefix_size, prefix_align, 168 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
157 suffix_size, suffix_align); 169 suffix_size, suffix_align);
158 if (addr == NULL) { 170 if (addr == NULL) {
159 // Try an even larger region. If this fails, address space is exhausted. 171 // Try an even larger region. If this fails, address space is exhausted.
160 addr = reserve_and_align(size + suffix_align, prefix_size, 172 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
161 prefix_align, suffix_size, suffix_align); 173 prefix_align, suffix_size, suffix_align);
162 } 174 }
163 } 175 }
164 176
165 _base = addr; 177 _base = addr;
166 _size = size; 178 _size = size;
167 _alignment = prefix_align; 179 _alignment = prefix_align;
180 _noaccess_prefix = noaccess_prefix;
168 } 181 }
169 182
170 void ReservedSpace::initialize(size_t size, size_t alignment, bool large, 183 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
171 char* requested_address) { 184 char* requested_address,
185 const size_t noaccess_prefix) {
172 const size_t granularity = os::vm_allocation_granularity(); 186 const size_t granularity = os::vm_allocation_granularity();
173 assert((size & granularity - 1) == 0, 187 assert((size & granularity - 1) == 0,
174 "size not aligned to os::vm_allocation_granularity()"); 188 "size not aligned to os::vm_allocation_granularity()");
175 assert((alignment & granularity - 1) == 0, 189 assert((alignment & granularity - 1) == 0,
176 "alignment not aligned to os::vm_allocation_granularity()"); 190 "alignment not aligned to os::vm_allocation_granularity()");
179 193
180 _base = NULL; 194 _base = NULL;
181 _size = 0; 195 _size = 0;
182 _special = false; 196 _special = false;
183 _alignment = 0; 197 _alignment = 0;
198 _noaccess_prefix = 0;
184 if (size == 0) { 199 if (size == 0) {
185 return; 200 return;
186 } 201 }
187 202
188 // If OS doesn't support demand paging for large page memory, we need 203 // If OS doesn't support demand paging for large page memory, we need
218 // If the memory was requested at a particular address, use 233 // If the memory was requested at a particular address, use
219 // os::attempt_reserve_memory_at() to avoid over mapping something 234 // os::attempt_reserve_memory_at() to avoid over mapping something
220 // important. If available space is not detected, return NULL. 235 // important. If available space is not detected, return NULL.
221 236
222 if (requested_address != 0) { 237 if (requested_address != 0) {
223 base = os::attempt_reserve_memory_at(size, requested_address); 238 base = os::attempt_reserve_memory_at(size,
239 requested_address-noaccess_prefix);
224 } else { 240 } else {
225 base = os::reserve_memory(size, NULL, alignment); 241 base = os::reserve_memory(size, NULL, alignment);
226 } 242 }
227 243
228 if (base == NULL) return; 244 if (base == NULL) return;
257 } 273 }
258 // Done 274 // Done
259 _base = base; 275 _base = base;
260 _size = size; 276 _size = size;
261 _alignment = MAX2(alignment, (size_t) os::vm_page_size()); 277 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
278 _noaccess_prefix = noaccess_prefix;
279
280 // Assert that if noaccess_prefix is used, it is the same as alignment.
281 assert(noaccess_prefix == 0 ||
282 noaccess_prefix == _alignment, "noaccess prefix wrong");
262 283
263 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, 284 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
264 "area must be distinguisable from marks for mark-sweep"); 285 "area must be distinguisable from marks for mark-sweep");
265 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], 286 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
266 "area must be distinguisable from marks for mark-sweep"); 287 "area must be distinguisable from marks for mark-sweep");
272 assert((size % os::vm_allocation_granularity()) == 0, 293 assert((size % os::vm_allocation_granularity()) == 0,
273 "size not allocation aligned"); 294 "size not allocation aligned");
274 _base = base; 295 _base = base;
275 _size = size; 296 _size = size;
276 _alignment = alignment; 297 _alignment = alignment;
298 _noaccess_prefix = 0;
277 _special = special; 299 _special = special;
278 } 300 }
279 301
280 302
281 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, 303 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
318 } 340 }
319 341
320 342
321 void ReservedSpace::release() { 343 void ReservedSpace::release() {
322 if (is_reserved()) { 344 if (is_reserved()) {
345 char *real_base = _base - _noaccess_prefix;
346 const size_t real_size = _size + _noaccess_prefix;
323 if (special()) { 347 if (special()) {
324 os::release_memory_special(_base, _size); 348 os::release_memory_special(real_base, real_size);
325 } else{ 349 } else{
326 os::release_memory(_base, _size); 350 os::release_memory(real_base, real_size);
327 } 351 }
328 _base = NULL; 352 _base = NULL;
329 _size = 0; 353 _size = 0;
354 _noaccess_prefix = 0;
330 _special = false; 355 _special = false;
331 } 356 }
332 } 357 }
333 358
359 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
360 // If there is noaccess prefix, return.
361 if (_noaccess_prefix == 0) return;
362
363 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
364 "must be at least page size big");
365
366 // Protect memory at the base of the allocated region.
367 // If special, the page was committed (only matters on windows)
368 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
369 _special)) {
370 fatal("cannot protect protection page");
371 }
372
373 _base += _noaccess_prefix;
374 _size -= _noaccess_prefix;
375 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
376 "must be exactly of required size and alignment");
377 }
378
379 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
380 bool large, char* requested_address) :
381 ReservedSpace(size, alignment, large,
382 requested_address,
383 UseCompressedOops ? lcm(os::vm_page_size(), alignment) : 0) {
384 // Only reserved space for the java heap should have a noaccess_prefix
385 // if using compressed oops.
386 protect_noaccess_prefix(size);
387 }
388
389 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
390 const size_t prefix_align,
391 const size_t suffix_size,
392 const size_t suffix_align) :
393 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
394 UseCompressedOops ? lcm(os::vm_page_size(), prefix_align) : 0) {
395 protect_noaccess_prefix(prefix_size+suffix_size);
396 }
334 397
335 // VirtualSpace 398 // VirtualSpace
336 399
337 VirtualSpace::VirtualSpace() { 400 VirtualSpace::VirtualSpace() {
338 _low_boundary = NULL; 401 _low_boundary = NULL;
346 _middle_high_boundary = NULL; 409 _middle_high_boundary = NULL;
347 _upper_high_boundary = NULL; 410 _upper_high_boundary = NULL;
348 _lower_alignment = 0; 411 _lower_alignment = 0;
349 _middle_alignment = 0; 412 _middle_alignment = 0;
350 _upper_alignment = 0; 413 _upper_alignment = 0;
414 _special = false;
351 } 415 }
352 416
353 417
354 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { 418 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
355 if(!rs.is_reserved()) return false; // allocation failed. 419 if(!rs.is_reserved()) return false; // allocation failed.
400 release(); 464 release();
401 } 465 }
402 466
403 467
404 void VirtualSpace::release() { 468 void VirtualSpace::release() {
405 (void)os::release_memory(low_boundary(), reserved_size()); 469 // This does not release memory it never reserved.
470 // Caller must release via rs.release();
406 _low_boundary = NULL; 471 _low_boundary = NULL;
407 _high_boundary = NULL; 472 _high_boundary = NULL;
408 _low = NULL; 473 _low = NULL;
409 _high = NULL; 474 _high = NULL;
410 _lower_high = NULL; 475 _lower_high = NULL;