comparison src/share/vm/runtime/virtualspace.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 1fdb98a17101 37f87013dfd8
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_virtualspace.cpp.incl"
27
28
29 // ReservedSpace
30 ReservedSpace::ReservedSpace(size_t size) {
31 initialize(size, 0, false, NULL);
32 }
33
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
35 bool large, char* requested_address) {
36 initialize(size, alignment, large, requested_address);
37 }
38
39 char *
40 ReservedSpace::align_reserved_region(char* addr, const size_t len,
41 const size_t prefix_size,
42 const size_t prefix_align,
43 const size_t suffix_size,
44 const size_t suffix_align)
45 {
46 assert(addr != NULL, "sanity");
47 const size_t required_size = prefix_size + suffix_size;
48 assert(len >= required_size, "len too small");
49
50 const size_t s = size_t(addr);
51 const size_t beg_ofs = s + prefix_size & suffix_align - 1;
52 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
53
54 if (len < beg_delta + required_size) {
55 return NULL; // Cannot do proper alignment.
56 }
57 const size_t end_delta = len - (beg_delta + required_size);
58
59 if (beg_delta != 0) {
60 os::release_memory(addr, beg_delta);
61 }
62
63 if (end_delta != 0) {
64 char* release_addr = (char*) (s + beg_delta + required_size);
65 os::release_memory(release_addr, end_delta);
66 }
67
68 return (char*) (s + beg_delta);
69 }
70
71 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
72 const size_t prefix_size,
73 const size_t prefix_align,
74 const size_t suffix_size,
75 const size_t suffix_align)
76 {
77 assert(reserve_size > prefix_size + suffix_size, "should not be here");
78
79 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
80 if (raw_addr == NULL) return NULL;
81
82 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
83 prefix_align, suffix_size,
84 suffix_align);
85 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
86 fatal("os::release_memory failed");
87 }
88
89 #ifdef ASSERT
90 if (result != NULL) {
91 const size_t raw = size_t(raw_addr);
92 const size_t res = size_t(result);
93 assert(res >= raw, "alignment decreased start addr");
94 assert(res + prefix_size + suffix_size <= raw + reserve_size,
95 "alignment increased end addr");
96 assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
97 assert((res + prefix_size & suffix_align - 1) == 0,
98 "bad alignment of suffix");
99 }
100 #endif
101
102 return result;
103 }
104
105 ReservedSpace::ReservedSpace(const size_t prefix_size,
106 const size_t prefix_align,
107 const size_t suffix_size,
108 const size_t suffix_align)
109 {
110 assert(prefix_size != 0, "sanity");
111 assert(prefix_align != 0, "sanity");
112 assert(suffix_size != 0, "sanity");
113 assert(suffix_align != 0, "sanity");
114 assert((prefix_size & prefix_align - 1) == 0,
115 "prefix_size not divisible by prefix_align");
116 assert((suffix_size & suffix_align - 1) == 0,
117 "suffix_size not divisible by suffix_align");
118 assert((suffix_align & prefix_align - 1) == 0,
119 "suffix_align not divisible by prefix_align");
120
121 // On systems where the entire region has to be reserved and committed up
122 // front, the compound alignment normally done by this method is unnecessary.
123 const bool try_reserve_special = UseLargePages &&
124 prefix_align == os::large_page_size();
125 if (!os::can_commit_large_page_memory() && try_reserve_special) {
126 initialize(prefix_size + suffix_size, prefix_align, true);
127 return;
128 }
129
130 _base = NULL;
131 _size = 0;
132 _alignment = 0;
133 _special = false;
134
135 // Optimistically try to reserve the exact size needed.
136 const size_t size = prefix_size + suffix_size;
137 char* addr = os::reserve_memory(size, NULL, prefix_align);
138 if (addr == NULL) return;
139
140 // Check whether the result has the needed alignment (unlikely unless
141 // prefix_align == suffix_align).
142 const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
143 if (ofs != 0) {
144 // Wrong alignment. Release, allocate more space and do manual alignment.
145 //
146 // On most operating systems, another allocation with a somewhat larger size
147 // will return an address "close to" that of the previous allocation. The
148 // result is often the same address (if the kernel hands out virtual
149 // addresses from low to high), or an address that is offset by the increase
150 // in size. Exploit that to minimize the amount of extra space requested.
151 if (!os::release_memory(addr, size)) {
152 fatal("os::release_memory failed");
153 }
154
155 const size_t extra = MAX2(ofs, suffix_align - ofs);
156 addr = reserve_and_align(size + extra, prefix_size, prefix_align,
157 suffix_size, suffix_align);
158 if (addr == NULL) {
159 // Try an even larger region. If this fails, address space is exhausted.
160 addr = reserve_and_align(size + suffix_align, prefix_size,
161 prefix_align, suffix_size, suffix_align);
162 }
163 }
164
165 _base = addr;
166 _size = size;
167 _alignment = prefix_align;
168 }
169
170 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
171 char* requested_address) {
172 const size_t granularity = os::vm_allocation_granularity();
173 assert((size & granularity - 1) == 0,
174 "size not aligned to os::vm_allocation_granularity()");
175 assert((alignment & granularity - 1) == 0,
176 "alignment not aligned to os::vm_allocation_granularity()");
177 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
178 "not a power of 2");
179
180 _base = NULL;
181 _size = 0;
182 _special = false;
183 _alignment = 0;
184 if (size == 0) {
185 return;
186 }
187
188 // If OS doesn't support demand paging for large page memory, we need
189 // to use reserve_memory_special() to reserve and pin the entire region.
190 bool special = large && !os::can_commit_large_page_memory();
191 char* base = NULL;
192
193 if (special) {
194 // It's not hard to implement reserve_memory_special() such that it can
195 // allocate at fixed address, but there seems no use of this feature
196 // for now, so it's not implemented.
197 assert(requested_address == NULL, "not implemented");
198
199 base = os::reserve_memory_special(size);
200
201 if (base != NULL) {
202 // Check alignment constraints
203 if (alignment > 0) {
204 assert((uintptr_t) base % alignment == 0,
205 "Large pages returned a non-aligned address");
206 }
207 _special = true;
208 } else {
209 // failed; try to reserve regular memory below
210 }
211 }
212
213 if (base == NULL) {
214 // Optimistically assume that the OSes returns an aligned base pointer.
215 // When reserving a large address range, most OSes seem to align to at
216 // least 64K.
217
218 // If the memory was requested at a particular address, use
219 // os::attempt_reserve_memory_at() to avoid over mapping something
220 // important. If available space is not detected, return NULL.
221
222 if (requested_address != 0) {
223 base = os::attempt_reserve_memory_at(size, requested_address);
224 } else {
225 base = os::reserve_memory(size, NULL, alignment);
226 }
227
228 if (base == NULL) return;
229
230 // Check alignment constraints
231 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
232 // Base not aligned, retry
233 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
234 // Reserve size large enough to do manual alignment and
235 // increase size to a multiple of the desired alignment
236 size = align_size_up(size, alignment);
237 size_t extra_size = size + alignment;
238 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
239 if (extra_base == NULL) return;
240 // Do manual alignement
241 base = (char*) align_size_up((uintptr_t) extra_base, alignment);
242 assert(base >= extra_base, "just checking");
243 // Release unused areas
244 size_t unused_bottom_size = base - extra_base;
245 size_t unused_top_size = extra_size - size - unused_bottom_size;
246 assert(unused_bottom_size % os::vm_allocation_granularity() == 0,
247 "size not allocation aligned");
248 assert(unused_top_size % os::vm_allocation_granularity() == 0,
249 "size not allocation aligned");
250 if (unused_bottom_size > 0) {
251 os::release_memory(extra_base, unused_bottom_size);
252 }
253 if (unused_top_size > 0) {
254 os::release_memory(base + size, unused_top_size);
255 }
256 }
257 }
258 // Done
259 _base = base;
260 _size = size;
261 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
262
263 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
264 "area must be distinguisable from marks for mark-sweep");
265 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
266 "area must be distinguisable from marks for mark-sweep");
267 }
268
269
270 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
271 bool special) {
272 assert((size % os::vm_allocation_granularity()) == 0,
273 "size not allocation aligned");
274 _base = base;
275 _size = size;
276 _alignment = alignment;
277 _special = special;
278 }
279
280
281 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
282 bool split, bool realloc) {
283 assert(partition_size <= size(), "partition failed");
284 if (split) {
285 os::split_reserved_memory(_base, _size, partition_size, realloc);
286 }
287 ReservedSpace result(base(), partition_size, alignment, special());
288 return result;
289 }
290
291
292 ReservedSpace
293 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
294 assert(partition_size <= size(), "partition failed");
295 ReservedSpace result(base() + partition_size, size() - partition_size,
296 alignment, special());
297 return result;
298 }
299
300
301 size_t ReservedSpace::page_align_size_up(size_t size) {
302 return align_size_up(size, os::vm_page_size());
303 }
304
305
306 size_t ReservedSpace::page_align_size_down(size_t size) {
307 return align_size_down(size, os::vm_page_size());
308 }
309
310
311 size_t ReservedSpace::allocation_align_size_up(size_t size) {
312 return align_size_up(size, os::vm_allocation_granularity());
313 }
314
315
316 size_t ReservedSpace::allocation_align_size_down(size_t size) {
317 return align_size_down(size, os::vm_allocation_granularity());
318 }
319
320
321 void ReservedSpace::release() {
322 if (is_reserved()) {
323 if (special()) {
324 os::release_memory_special(_base, _size);
325 } else{
326 os::release_memory(_base, _size);
327 }
328 _base = NULL;
329 _size = 0;
330 _special = false;
331 }
332 }
333
334
335 // VirtualSpace
336
337 VirtualSpace::VirtualSpace() {
338 _low_boundary = NULL;
339 _high_boundary = NULL;
340 _low = NULL;
341 _high = NULL;
342 _lower_high = NULL;
343 _middle_high = NULL;
344 _upper_high = NULL;
345 _lower_high_boundary = NULL;
346 _middle_high_boundary = NULL;
347 _upper_high_boundary = NULL;
348 _lower_alignment = 0;
349 _middle_alignment = 0;
350 _upper_alignment = 0;
351 }
352
353
354 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
355 if(!rs.is_reserved()) return false; // allocation failed.
356 assert(_low_boundary == NULL, "VirtualSpace already initialized");
357 _low_boundary = rs.base();
358 _high_boundary = low_boundary() + rs.size();
359
360 _low = low_boundary();
361 _high = low();
362
363 _special = rs.special();
364
365 // When a VirtualSpace begins life at a large size, make all future expansion
366 // and shrinking occur aligned to a granularity of large pages. This avoids
367 // fragmentation of physical addresses that inhibits the use of large pages
368 // by the OS virtual memory system. Empirically, we see that with a 4MB
369 // page size, the only spaces that get handled this way are codecache and
370 // the heap itself, both of which provide a substantial performance
371 // boost in many benchmarks when covered by large pages.
372 //
373 // No attempt is made to force large page alignment at the very top and
374 // bottom of the space if they are not aligned so already.
375 _lower_alignment = os::vm_page_size();
376 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
377 _upper_alignment = os::vm_page_size();
378
379 // End of each region
380 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
381 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
382 _upper_high_boundary = high_boundary();
383
384 // High address of each region
385 _lower_high = low_boundary();
386 _middle_high = lower_high_boundary();
387 _upper_high = middle_high_boundary();
388
389 // commit to initial size
390 if (committed_size > 0) {
391 if (!expand_by(committed_size)) {
392 return false;
393 }
394 }
395 return true;
396 }
397
398
399 VirtualSpace::~VirtualSpace() {
400 release();
401 }
402
403
404 void VirtualSpace::release() {
405 (void)os::release_memory(low_boundary(), reserved_size());
406 _low_boundary = NULL;
407 _high_boundary = NULL;
408 _low = NULL;
409 _high = NULL;
410 _lower_high = NULL;
411 _middle_high = NULL;
412 _upper_high = NULL;
413 _lower_high_boundary = NULL;
414 _middle_high_boundary = NULL;
415 _upper_high_boundary = NULL;
416 _lower_alignment = 0;
417 _middle_alignment = 0;
418 _upper_alignment = 0;
419 _special = false;
420 }
421
422
423 size_t VirtualSpace::committed_size() const {
424 return pointer_delta(high(), low(), sizeof(char));
425 }
426
427
428 size_t VirtualSpace::reserved_size() const {
429 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
430 }
431
432
433 size_t VirtualSpace::uncommitted_size() const {
434 return reserved_size() - committed_size();
435 }
436
437
438 bool VirtualSpace::contains(const void* p) const {
439 return low() <= (const char*) p && (const char*) p < high();
440 }
441
442 /*
443 First we need to determine if a particular virtual space is using large
444 pages. This is done at the initialize function and only virtual spaces
445 that are larger than LargePageSizeInBytes use large pages. Once we
446 have determined this, all expand_by and shrink_by calls must grow and
447 shrink by large page size chunks. If a particular request
448 is within the current large page, the call to commit and uncommit memory
449 can be ignored. In the case that the low and high boundaries of this
450 space is not large page aligned, the pages leading to the first large
451 page address and the pages after the last large page address must be
452 allocated with default pages.
453 */
454 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
455 if (uncommitted_size() < bytes) return false;
456
457 if (special()) {
458 // don't commit memory if the entire space is pinned in memory
459 _high += bytes;
460 return true;
461 }
462
463 char* previous_high = high();
464 char* unaligned_new_high = high() + bytes;
465 assert(unaligned_new_high <= high_boundary(),
466 "cannot expand by more than upper boundary");
467
468 // Calculate where the new high for each of the regions should be. If
469 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
470 // then the unaligned lower and upper new highs would be the
471 // lower_high() and upper_high() respectively.
472 char* unaligned_lower_new_high =
473 MIN2(unaligned_new_high, lower_high_boundary());
474 char* unaligned_middle_new_high =
475 MIN2(unaligned_new_high, middle_high_boundary());
476 char* unaligned_upper_new_high =
477 MIN2(unaligned_new_high, upper_high_boundary());
478
479 // Align the new highs based on the regions alignment. lower and upper
480 // alignment will always be default page size. middle alignment will be
481 // LargePageSizeInBytes if the actual size of the virtual space is in
482 // fact larger than LargePageSizeInBytes.
483 char* aligned_lower_new_high =
484 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
485 char* aligned_middle_new_high =
486 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
487 char* aligned_upper_new_high =
488 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
489
490 // Determine which regions need to grow in this expand_by call.
491 // If you are growing in the lower region, high() must be in that
492 // region so calcuate the size based on high(). For the middle and
493 // upper regions, determine the starting point of growth based on the
494 // location of high(). By getting the MAX of the region's low address
495 // (or the prevoius region's high address) and high(), we can tell if it
496 // is an intra or inter region growth.
497 size_t lower_needs = 0;
498 if (aligned_lower_new_high > lower_high()) {
499 lower_needs =
500 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
501 }
502 size_t middle_needs = 0;
503 if (aligned_middle_new_high > middle_high()) {
504 middle_needs =
505 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
506 }
507 size_t upper_needs = 0;
508 if (aligned_upper_new_high > upper_high()) {
509 upper_needs =
510 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
511 }
512
513 // Check contiguity.
514 assert(low_boundary() <= lower_high() &&
515 lower_high() <= lower_high_boundary(),
516 "high address must be contained within the region");
517 assert(lower_high_boundary() <= middle_high() &&
518 middle_high() <= middle_high_boundary(),
519 "high address must be contained within the region");
520 assert(middle_high_boundary() <= upper_high() &&
521 upper_high() <= upper_high_boundary(),
522 "high address must be contained within the region");
523
524 // Commit regions
525 if (lower_needs > 0) {
526 assert(low_boundary() <= lower_high() &&
527 lower_high() + lower_needs <= lower_high_boundary(),
528 "must not expand beyond region");
529 if (!os::commit_memory(lower_high(), lower_needs)) {
530 debug_only(warning("os::commit_memory failed"));
531 return false;
532 } else {
533 _lower_high += lower_needs;
534 }
535 }
536 if (middle_needs > 0) {
537 assert(lower_high_boundary() <= middle_high() &&
538 middle_high() + middle_needs <= middle_high_boundary(),
539 "must not expand beyond region");
540 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
541 debug_only(warning("os::commit_memory failed"));
542 return false;
543 }
544 _middle_high += middle_needs;
545 }
546 if (upper_needs > 0) {
547 assert(middle_high_boundary() <= upper_high() &&
548 upper_high() + upper_needs <= upper_high_boundary(),
549 "must not expand beyond region");
550 if (!os::commit_memory(upper_high(), upper_needs)) {
551 debug_only(warning("os::commit_memory failed"));
552 return false;
553 } else {
554 _upper_high += upper_needs;
555 }
556 }
557
558 if (pre_touch || AlwaysPreTouch) {
559 int vm_ps = os::vm_page_size();
560 for (char* curr = previous_high;
561 curr < unaligned_new_high;
562 curr += vm_ps) {
563 // Note the use of a write here; originally we tried just a read, but
564 // since the value read was unused, the optimizer removed the read.
565 // If we ever have a concurrent touchahead thread, we'll want to use
566 // a read, to avoid the potential of overwriting data (if a mutator
567 // thread beats the touchahead thread to a page). There are various
568 // ways of making sure this read is not optimized away: for example,
569 // generating the code for a read procedure at runtime.
570 *curr = 0;
571 }
572 }
573
574 _high += bytes;
575 return true;
576 }
577
578 // A page is uncommitted if the contents of the entire page is deemed unusable.
579 // Continue to decrement the high() pointer until it reaches a page boundary
580 // in which case that particular page can now be uncommitted.
581 void VirtualSpace::shrink_by(size_t size) {
582 if (committed_size() < size)
583 fatal("Cannot shrink virtual space to negative size");
584
585 if (special()) {
586 // don't uncommit if the entire space is pinned in memory
587 _high -= size;
588 return;
589 }
590
591 char* unaligned_new_high = high() - size;
592 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
593
594 // Calculate new unaligned address
595 char* unaligned_upper_new_high =
596 MAX2(unaligned_new_high, middle_high_boundary());
597 char* unaligned_middle_new_high =
598 MAX2(unaligned_new_high, lower_high_boundary());
599 char* unaligned_lower_new_high =
600 MAX2(unaligned_new_high, low_boundary());
601
602 // Align address to region's alignment
603 char* aligned_upper_new_high =
604 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
605 char* aligned_middle_new_high =
606 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
607 char* aligned_lower_new_high =
608 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
609
610 // Determine which regions need to shrink
611 size_t upper_needs = 0;
612 if (aligned_upper_new_high < upper_high()) {
613 upper_needs =
614 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
615 }
616 size_t middle_needs = 0;
617 if (aligned_middle_new_high < middle_high()) {
618 middle_needs =
619 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
620 }
621 size_t lower_needs = 0;
622 if (aligned_lower_new_high < lower_high()) {
623 lower_needs =
624 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
625 }
626
627 // Check contiguity.
628 assert(middle_high_boundary() <= upper_high() &&
629 upper_high() <= upper_high_boundary(),
630 "high address must be contained within the region");
631 assert(lower_high_boundary() <= middle_high() &&
632 middle_high() <= middle_high_boundary(),
633 "high address must be contained within the region");
634 assert(low_boundary() <= lower_high() &&
635 lower_high() <= lower_high_boundary(),
636 "high address must be contained within the region");
637
638 // Uncommit
639 if (upper_needs > 0) {
640 assert(middle_high_boundary() <= aligned_upper_new_high &&
641 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
642 "must not shrink beyond region");
643 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
644 debug_only(warning("os::uncommit_memory failed"));
645 return;
646 } else {
647 _upper_high -= upper_needs;
648 }
649 }
650 if (middle_needs > 0) {
651 assert(lower_high_boundary() <= aligned_middle_new_high &&
652 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
653 "must not shrink beyond region");
654 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
655 debug_only(warning("os::uncommit_memory failed"));
656 return;
657 } else {
658 _middle_high -= middle_needs;
659 }
660 }
661 if (lower_needs > 0) {
662 assert(low_boundary() <= aligned_lower_new_high &&
663 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
664 "must not shrink beyond region");
665 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
666 debug_only(warning("os::uncommit_memory failed"));
667 return;
668 } else {
669 _lower_high -= lower_needs;
670 }
671 }
672
673 _high -= size;
674 }
675
676 #ifndef PRODUCT
677 void VirtualSpace::check_for_contiguity() {
678 // Check contiguity.
679 assert(low_boundary() <= lower_high() &&
680 lower_high() <= lower_high_boundary(),
681 "high address must be contained within the region");
682 assert(lower_high_boundary() <= middle_high() &&
683 middle_high() <= middle_high_boundary(),
684 "high address must be contained within the region");
685 assert(middle_high_boundary() <= upper_high() &&
686 upper_high() <= upper_high_boundary(),
687 "high address must be contained within the region");
688 assert(low() >= low_boundary(), "low");
689 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
690 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
691 assert(high() <= upper_high(), "upper high");
692 }
693
694 void VirtualSpace::print() {
695 tty->print ("Virtual space:");
696 if (special()) tty->print(" (pinned in memory)");
697 tty->cr();
698 tty->print_cr(" - committed: %ld", committed_size());
699 tty->print_cr(" - reserved: %ld", reserved_size());
700 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
701 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
702 }
703
704 #endif