comparison src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children c18cbe5936b8
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_objectStartArray.cpp.incl"
27
28 void ObjectStartArray::initialize(MemRegion reserved_region) {
29 // We're based on the assumption that we use the same
30 // size blocks as the card table.
31 assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
32 assert((int)block_size <= 512, "block_size must be less than or equal to 512");
33
34 // Calculate how much space must be reserved
35 _reserved_region = reserved_region;
36
37 size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
38 assert(bytes_to_reserve > 0, "Sanity");
39
40 bytes_to_reserve =
41 align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
42
43 // Do not use large-pages for the backing store. The one large page region
44 // will be used for the heap proper.
45 ReservedSpace backing_store(bytes_to_reserve);
46 if (!backing_store.is_reserved()) {
47 vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
48 }
49
50 // We do not commit any memory initially
51 if (!_virtual_space.initialize(backing_store, 0)) {
52 vm_exit_during_initialization("Could not commit space for ObjectStartArray");
53 }
54
55 _raw_base = (jbyte*)_virtual_space.low_boundary();
56 if (_raw_base == NULL) {
57 vm_exit_during_initialization("Could not get raw_base address");
58 }
59
60 _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift);
61
62 _covered_region.set_start(reserved_region.start());
63 _covered_region.set_word_size(0);
64
65 _blocks_region.set_start((HeapWord*)_raw_base);
66 _blocks_region.set_word_size(0);
67 }
68
69 void ObjectStartArray::set_covered_region(MemRegion mr) {
70 assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
71 assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
72
73 HeapWord* low_bound = mr.start();
74 HeapWord* high_bound = mr.end();
75 assert((uintptr_t(low_bound) & (block_size - 1)) == 0, "heap must start at block boundary");
76 assert((uintptr_t(high_bound) & (block_size - 1)) == 0, "heap must end at block boundary");
77
78 size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
79
80 // Only commit memory in page sized chunks
81 requested_blocks_size_in_bytes =
82 align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
83
84 _covered_region = mr;
85
86 size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
87
88 if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
89 // Expand
90 size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
91 if (!_virtual_space.expand_by(expand_by)) {
92 vm_exit_out_of_memory(expand_by, "object start array expansion");
93 }
94 // Clear *only* the newly allocated region
95 memset(_blocks_region.end(), clean_block, expand_by);
96 }
97
98 if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
99 // Shrink
100 size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
101 _virtual_space.shrink_by(shrink_by);
102 }
103
104 _blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));
105
106 assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
107 assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
108 assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
109 assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
110 }
111
112 void ObjectStartArray::reset() {
113 memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
114 }
115
116
117 bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
118 HeapWord* end_addr) const {
119 assert(start_addr <= end_addr, "range is wrong");
120 if (start_addr > end_addr) {
121 return false;
122 }
123
124 jbyte* start_block = block_for_addr(start_addr);
125 jbyte* end_block = block_for_addr(end_addr);
126
127 for (jbyte* block = start_block; block <= end_block; block++) {
128 if (*block != clean_block) {
129 return true;
130 }
131 }
132 // No object starts in this slice; verify this using
133 // more traditional methods:
134 assert(object_start(end_addr - 1) <= start_addr,
135 "Oops an object does start in this slice?");
136 return false;
137 }