Mercurial > hg > truffle
annotate src/share/vm/memory/space.cpp @ 1142:4ce7240d622c
6914300: ciEnv should export all well known classes
Reviewed-by: kvn, twisti
author | never |
---|---|
date | Wed, 06 Jan 2010 14:22:39 -0800 |
parents | 0fbdb4381b99 |
children | c18cbe5936b8 2d127394260e |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_space.cpp.incl" | |
27 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
28 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
29 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
30 |
0 | 31 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, |
32 HeapWord* top_obj) { | |
33 if (top_obj != NULL) { | |
34 if (_sp->block_is_obj(top_obj)) { | |
35 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { | |
36 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { | |
37 // An arrayOop is starting on the dirty card - since we do exact | |
38 // store checks for objArrays we are done. | |
39 } else { | |
40 // Otherwise, it is possible that the object starting on the dirty | |
41 // card spans the entire card, and that the store happened on a | |
42 // later card. Figure out where the object ends. | |
43 // Use the block_size() method of the space over which | |
44 // the iteration is being done. That space (e.g. CMS) may have | |
45 // specific requirements on object sizes which will | |
46 // be reflected in the block_size() method. | |
47 top = top_obj + oop(top_obj)->size(); | |
48 } | |
49 } | |
50 } else { | |
51 top = top_obj; | |
52 } | |
53 } else { | |
54 assert(top == _sp->end(), "only case where top_obj == NULL"); | |
55 } | |
56 return top; | |
57 } | |
58 | |
59 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, | |
60 HeapWord* bottom, | |
61 HeapWord* top) { | |
62 // 1. Blocks may or may not be objects. | |
63 // 2. Even when a block_is_obj(), it may not entirely | |
64 // occupy the block if the block quantum is larger than | |
65 // the object size. | |
66 // We can and should try to optimize by calling the non-MemRegion | |
67 // version of oop_iterate() for all but the extremal objects | |
68 // (for which we need to call the MemRegion version of | |
69 // oop_iterate()) To be done post-beta XXX | |
70 for (; bottom < top; bottom += _sp->block_size(bottom)) { | |
71 // As in the case of contiguous space above, we'd like to | |
72 // just use the value returned by oop_iterate to increment the | |
73 // current pointer; unfortunately, that won't work in CMS because | |
74 // we'd need an interface change (it seems) to have the space | |
75 // "adjust the object size" (for instance pad it up to its | |
76 // block alignment or minimum block size restrictions. XXX | |
77 if (_sp->block_is_obj(bottom) && | |
78 !_sp->obj_allocated_since_save_marks(oop(bottom))) { | |
79 oop(bottom)->oop_iterate(_cl, mr); | |
80 } | |
81 } | |
82 } | |
83 | |
84 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { | |
85 | |
86 // Some collectors need to do special things whenever their dirty | |
87 // cards are processed. For instance, CMS must remember mutator updates | |
88 // (i.e. dirty cards) so as to re-scan mutated objects. | |
89 // Such work can be piggy-backed here on dirty card scanning, so as to make | |
90 // it slightly more efficient than doing a complete non-detructive pre-scan | |
91 // of the card table. | |
92 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); | |
93 if (pCl != NULL) { | |
94 pCl->do_MemRegion(mr); | |
95 } | |
96 | |
97 HeapWord* bottom = mr.start(); | |
98 HeapWord* last = mr.last(); | |
99 HeapWord* top = mr.end(); | |
100 HeapWord* bottom_obj; | |
101 HeapWord* top_obj; | |
102 | |
103 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || | |
104 _precision == CardTableModRefBS::Precise, | |
105 "Only ones we deal with for now."); | |
106 | |
107 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
108 _cl->idempotent() || _last_bottom == NULL || |
0 | 109 top <= _last_bottom, |
110 "Not decreasing"); | |
111 NOT_PRODUCT(_last_bottom = mr.start()); | |
112 | |
113 bottom_obj = _sp->block_start(bottom); | |
114 top_obj = _sp->block_start(last); | |
115 | |
116 assert(bottom_obj <= bottom, "just checking"); | |
117 assert(top_obj <= top, "just checking"); | |
118 | |
119 // Given what we think is the top of the memory region and | |
120 // the start of the object at the top, get the actual | |
121 // value of the top. | |
122 top = get_actual_top(top, top_obj); | |
123 | |
124 // If the previous call did some part of this region, don't redo. | |
125 if (_precision == CardTableModRefBS::ObjHeadPreciseArray && | |
126 _min_done != NULL && | |
127 _min_done < top) { | |
128 top = _min_done; | |
129 } | |
130 | |
131 // Top may have been reset, and in fact may be below bottom, | |
132 // e.g. the dirty card region is entirely in a now free object | |
133 // -- something that could happen with a concurrent sweeper. | |
134 bottom = MIN2(bottom, top); | |
135 mr = MemRegion(bottom, top); | |
136 assert(bottom <= top && | |
137 (_precision != CardTableModRefBS::ObjHeadPreciseArray || | |
138 _min_done == NULL || | |
139 top <= _min_done), | |
140 "overlap!"); | |
141 | |
142 // Walk the region if it is not empty; otherwise there is nothing to do. | |
143 if (!mr.is_empty()) { | |
144 walk_mem_region(mr, bottom_obj, top); | |
145 } | |
146 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
147 // An idempotent closure might be applied in any order, so we don't |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
148 // record a _min_done for it. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
149 if (!_cl->idempotent()) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
150 _min_done = bottom; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
151 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
152 assert(_min_done == _last_explicit_min_done, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
153 "Don't update _min_done for idempotent cl"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
154 } |
0 | 155 } |
156 | |
157 DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl, | |
158 CardTableModRefBS::PrecisionStyle precision, | |
159 HeapWord* boundary) { | |
160 return new DirtyCardToOopClosure(this, cl, precision, boundary); | |
161 } | |
162 | |
163 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, | |
164 HeapWord* top_obj) { | |
165 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { | |
166 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { | |
167 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { | |
168 // An arrayOop is starting on the dirty card - since we do exact | |
169 // store checks for objArrays we are done. | |
170 } else { | |
171 // Otherwise, it is possible that the object starting on the dirty | |
172 // card spans the entire card, and that the store happened on a | |
173 // later card. Figure out where the object ends. | |
174 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), | |
175 "Block size and object size mismatch"); | |
176 top = top_obj + oop(top_obj)->size(); | |
177 } | |
178 } | |
179 } else { | |
180 top = (_sp->toContiguousSpace())->top(); | |
181 } | |
182 return top; | |
183 } | |
184 | |
185 void Filtering_DCTOC::walk_mem_region(MemRegion mr, | |
186 HeapWord* bottom, | |
187 HeapWord* top) { | |
188 // Note that this assumption won't hold if we have a concurrent | |
189 // collector in this space, which may have freed up objects after | |
190 // they were dirtied and before the stop-the-world GC that is | |
191 // examining cards here. | |
192 assert(bottom < top, "ought to be at least one obj on a dirty card."); | |
193 | |
194 if (_boundary != NULL) { | |
195 // We have a boundary outside of which we don't want to look | |
196 // at objects, so create a filtering closure around the | |
197 // oop closure before walking the region. | |
198 FilteringClosure filter(_boundary, _cl); | |
199 walk_mem_region_with_cl(mr, bottom, top, &filter); | |
200 } else { | |
201 // No boundary, simply walk the heap with the oop closure. | |
202 walk_mem_region_with_cl(mr, bottom, top, _cl); | |
203 } | |
204 | |
205 } | |
206 | |
207 // We must replicate this so that the static type of "FilteringClosure" | |
208 // (see above) is apparent at the oop_iterate calls. | |
209 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ | |
210 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
211 HeapWord* bottom, \ | |
212 HeapWord* top, \ | |
213 ClosureType* cl) { \ | |
214 bottom += oop(bottom)->oop_iterate(cl, mr); \ | |
215 if (bottom < top) { \ | |
216 HeapWord* next_obj = bottom + oop(bottom)->size(); \ | |
217 while (next_obj < top) { \ | |
218 /* Bottom lies entirely below top, so we can call the */ \ | |
219 /* non-memRegion version of oop_iterate below. */ \ | |
220 oop(bottom)->oop_iterate(cl); \ | |
221 bottom = next_obj; \ | |
222 next_obj = bottom + oop(bottom)->size(); \ | |
223 } \ | |
224 /* Last object. */ \ | |
225 oop(bottom)->oop_iterate(cl, mr); \ | |
226 } \ | |
227 } | |
228 | |
229 // (There are only two of these, rather than N, because the split is due | |
230 // only to the introduction of the FilteringClosure, a local part of the | |
231 // impl of this abstraction.) | |
232 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure) | |
233 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) | |
234 | |
235 DirtyCardToOopClosure* | |
236 ContiguousSpace::new_dcto_cl(OopClosure* cl, | |
237 CardTableModRefBS::PrecisionStyle precision, | |
238 HeapWord* boundary) { | |
239 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); | |
240 } | |
241 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
242 void Space::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
243 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
244 bool mangle_space) { |
0 | 245 HeapWord* bottom = mr.start(); |
246 HeapWord* end = mr.end(); | |
247 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), | |
248 "invalid space boundaries"); | |
249 set_bottom(bottom); | |
250 set_end(end); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
251 if (clear_space) clear(mangle_space); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
252 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
253 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
254 void Space::clear(bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
255 if (ZapUnusedHeapArea && mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
256 mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
257 } |
0 | 258 } |
259 | |
356 | 260 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), |
261 _concurrent_iteration_safe_limit(NULL) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
262 _mangler = new GenSpaceMangler(this); |
0 | 263 } |
264 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
265 ContiguousSpace::~ContiguousSpace() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
266 delete _mangler; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
267 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
268 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
269 void ContiguousSpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
270 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
271 bool mangle_space) |
0 | 272 { |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
273 CompactibleSpace::initialize(mr, clear_space, mangle_space); |
347
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
344
diff
changeset
|
274 set_concurrent_iteration_safe_limit(top()); |
0 | 275 } |
276 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
277 void ContiguousSpace::clear(bool mangle_space) { |
0 | 278 set_top(bottom()); |
279 set_saved_mark(); | |
356 | 280 CompactibleSpace::clear(mangle_space); |
0 | 281 } |
282 | |
283 bool Space::is_in(const void* p) const { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
284 HeapWord* b = block_start_const(p); |
0 | 285 return b != NULL && block_is_obj(b); |
286 } | |
287 | |
288 bool ContiguousSpace::is_in(const void* p) const { | |
289 return _bottom <= p && p < _top; | |
290 } | |
291 | |
292 bool ContiguousSpace::is_free_block(const HeapWord* p) const { | |
293 return p >= _top; | |
294 } | |
295 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
296 void OffsetTableContigSpace::clear(bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
297 ContiguousSpace::clear(mangle_space); |
0 | 298 _offsets.initialize_threshold(); |
299 } | |
300 | |
301 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { | |
302 Space::set_bottom(new_bottom); | |
303 _offsets.set_bottom(new_bottom); | |
304 } | |
305 | |
306 void OffsetTableContigSpace::set_end(HeapWord* new_end) { | |
307 // Space should not advertize an increase in size | |
308 // until after the underlying offest table has been enlarged. | |
309 _offsets.resize(pointer_delta(new_end, bottom())); | |
310 Space::set_end(new_end); | |
311 } | |
312 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
313 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
314 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
315 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
316 mangler()->set_top_for_allocations(v); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
317 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
318 void ContiguousSpace::set_top_for_allocations() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
319 mangler()->set_top_for_allocations(top()); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
320 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
321 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
322 mangler()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
323 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
324 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
325 void ContiguousSpace::check_mangled_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
326 mangler()->check_mangled_unused_area_complete(); |
0 | 327 } |
328 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
329 // Mangled only the unused space that has not previously |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
330 // been mangled and that has not been allocated since being |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
331 // mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
332 void ContiguousSpace::mangle_unused_area() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
333 mangler()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
334 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
335 void ContiguousSpace::mangle_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
336 mangler()->mangle_unused_area_complete(); |
0 | 337 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
338 void ContiguousSpace::mangle_region(MemRegion mr) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
339 // Although this method uses SpaceMangler::mangle_region() which |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
340 // is not specific to a space, the when the ContiguousSpace version |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
341 // is called, it is always with regard to a space and this |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
342 // bounds checking is appropriate. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
343 MemRegion space_mr(bottom(), end()); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
344 assert(space_mr.contains(mr), "Mangling outside space"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
345 SpaceMangler::mangle_region(mr); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
346 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
347 #endif // NOT_PRODUCT |
0 | 348 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
349 void CompactibleSpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
350 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
351 bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
352 Space::initialize(mr, clear_space, mangle_space); |
356 | 353 set_compaction_top(bottom()); |
354 _next_compaction_space = NULL; | |
355 } | |
356 | |
357 void CompactibleSpace::clear(bool mangle_space) { | |
358 Space::clear(mangle_space); | |
0 | 359 _compaction_top = bottom(); |
360 } | |
361 | |
362 HeapWord* CompactibleSpace::forward(oop q, size_t size, | |
363 CompactPoint* cp, HeapWord* compact_top) { | |
364 // q is alive | |
365 // First check if we should switch compaction space | |
366 assert(this == cp->space, "'this' should be current compaction space."); | |
367 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
368 while (size > compaction_max_size) { | |
369 // switch to next compaction space | |
370 cp->space->set_compaction_top(compact_top); | |
371 cp->space = cp->space->next_compaction_space(); | |
372 if (cp->space == NULL) { | |
373 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
374 assert(cp->gen != NULL, "compaction must succeed"); | |
375 cp->space = cp->gen->first_compaction_space(); | |
376 assert(cp->space != NULL, "generation must have a first compaction space"); | |
377 } | |
378 compact_top = cp->space->bottom(); | |
379 cp->space->set_compaction_top(compact_top); | |
380 cp->threshold = cp->space->initialize_threshold(); | |
381 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
382 } | |
383 | |
384 // store the forwarding pointer into the mark word | |
385 if ((HeapWord*)q != compact_top) { | |
386 q->forward_to(oop(compact_top)); | |
387 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
388 } else { | |
389 // if the object isn't moving we can just set the mark to the default | |
390 // mark and handle it specially later on. | |
391 q->init_mark(); | |
392 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
393 } | |
394 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
395 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size)); |
0 | 396 compact_top += size; |
397 | |
398 // we need to update the offset table so that the beginnings of objects can be | |
399 // found during scavenge. Note that we are updating the offset table based on | |
400 // where the object will be once the compaction phase finishes. | |
401 if (compact_top > cp->threshold) | |
402 cp->threshold = | |
403 cp->space->cross_threshold(compact_top - size, compact_top); | |
404 return compact_top; | |
405 } | |
406 | |
407 | |
408 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, | |
409 HeapWord* q, size_t deadlength) { | |
410 if (allowed_deadspace_words >= deadlength) { | |
411 allowed_deadspace_words -= deadlength; | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
438
diff
changeset
|
412 CollectedHeap::fill_with_object(q, deadlength); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
438
diff
changeset
|
413 oop(q)->set_mark(oop(q)->mark()->set_marked()); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
438
diff
changeset
|
414 assert((int) deadlength == oop(q)->size(), "bad filler object size"); |
0 | 415 // Recall that we required "q == compaction_top". |
416 return true; | |
417 } else { | |
418 allowed_deadspace_words = 0; | |
419 return false; | |
420 } | |
421 } | |
422 | |
423 #define block_is_always_obj(q) true | |
424 #define obj_size(q) oop(q)->size() | |
425 #define adjust_obj_size(s) s | |
426 | |
427 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { | |
428 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); | |
429 } | |
430 | |
431 // Faster object search. | |
432 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { | |
433 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); | |
434 } | |
435 | |
436 void Space::adjust_pointers() { | |
437 // adjust all the interior pointers to point at the new locations of objects | |
438 // Used by MarkSweep::mark_sweep_phase3() | |
439 | |
440 // First check to see if there is any work to be done. | |
441 if (used() == 0) { | |
442 return; // Nothing to do. | |
443 } | |
444 | |
445 // Otherwise... | |
446 HeapWord* q = bottom(); | |
447 HeapWord* t = end(); | |
448 | |
449 debug_only(HeapWord* prev_q = NULL); | |
450 while (q < t) { | |
451 if (oop(q)->is_gc_marked()) { | |
452 // q is alive | |
453 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
454 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); |
0 | 455 // point all the oops to the new location |
456 size_t size = oop(q)->adjust_pointers(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
457 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); |
0 | 458 |
459 debug_only(prev_q = q); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
460 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); |
0 | 461 |
462 q += size; | |
463 } else { | |
464 // q is not a live object. But we're not in a compactible space, | |
465 // So we don't have live ranges. | |
466 debug_only(prev_q = q); | |
467 q += block_size(q); | |
468 assert(q > prev_q, "we should be moving forward through memory"); | |
469 } | |
470 } | |
471 assert(q == t, "just checking"); | |
472 } | |
473 | |
474 void CompactibleSpace::adjust_pointers() { | |
475 // Check first is there is any work to do. | |
476 if (used() == 0) { | |
477 return; // Nothing to do. | |
478 } | |
479 | |
480 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
481 } | |
482 | |
483 void CompactibleSpace::compact() { | |
484 SCAN_AND_COMPACT(obj_size); | |
485 } | |
486 | |
487 void Space::print_short() const { print_short_on(tty); } | |
488 | |
489 void Space::print_short_on(outputStream* st) const { | |
490 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, | |
491 (int) ((double) used() * 100 / capacity())); | |
492 } | |
493 | |
494 void Space::print() const { print_on(tty); } | |
495 | |
496 void Space::print_on(outputStream* st) const { | |
497 print_short_on(st); | |
498 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
499 bottom(), end()); | |
500 } | |
501 | |
502 void ContiguousSpace::print_on(outputStream* st) const { | |
503 print_short_on(st); | |
504 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
505 bottom(), top(), end()); | |
506 } | |
507 | |
508 void OffsetTableContigSpace::print_on(outputStream* st) const { | |
509 print_short_on(st); | |
510 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " | |
511 INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
512 bottom(), top(), _offsets.threshold(), end()); | |
513 } | |
514 | |
515 void ContiguousSpace::verify(bool allow_dirty) const { | |
516 HeapWord* p = bottom(); | |
517 HeapWord* t = top(); | |
518 HeapWord* prev_p = NULL; | |
519 while (p < t) { | |
520 oop(p)->verify(); | |
521 prev_p = p; | |
522 p += oop(p)->size(); | |
523 } | |
524 guarantee(p == top(), "end of last object must match end of space"); | |
525 if (top() != end()) { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
526 guarantee(top() == block_start_const(end()-1) && |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
527 top() == block_start_const(top()), |
0 | 528 "top should be start of unallocated block, if it exists"); |
529 } | |
530 } | |
531 | |
532 void Space::oop_iterate(OopClosure* blk) { | |
533 ObjectToOopClosure blk2(blk); | |
534 object_iterate(&blk2); | |
535 } | |
536 | |
537 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) { | |
538 guarantee(false, "NYI"); | |
539 return bottom(); | |
540 } | |
541 | |
542 HeapWord* Space::object_iterate_careful_m(MemRegion mr, | |
543 ObjectClosureCareful* cl) { | |
544 guarantee(false, "NYI"); | |
545 return bottom(); | |
546 } | |
547 | |
548 | |
549 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { | |
550 assert(!mr.is_empty(), "Should be non-empty"); | |
551 // We use MemRegion(bottom(), end()) rather than used_region() below | |
552 // because the two are not necessarily equal for some kinds of | |
553 // spaces, in particular, certain kinds of free list spaces. | |
554 // We could use the more complicated but more precise: | |
555 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) | |
556 // but the slight imprecision seems acceptable in the assertion check. | |
557 assert(MemRegion(bottom(), end()).contains(mr), | |
558 "Should be within used space"); | |
559 HeapWord* prev = cl->previous(); // max address from last time | |
560 if (prev >= mr.end()) { // nothing to do | |
561 return; | |
562 } | |
563 // This assert will not work when we go from cms space to perm | |
564 // space, and use same closure. Easy fix deferred for later. XXX YSR | |
565 // assert(prev == NULL || contains(prev), "Should be within space"); | |
566 | |
567 bool last_was_obj_array = false; | |
568 HeapWord *blk_start_addr, *region_start_addr; | |
569 if (prev > mr.start()) { | |
570 region_start_addr = prev; | |
571 blk_start_addr = prev; | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
572 // The previous invocation may have pushed "prev" beyond the |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
573 // last allocated block yet there may be still be blocks |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
574 // in this region due to a particular coalescing policy. |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
575 // Relax the assertion so that the case where the unallocated |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
576 // block is maintained and "prev" is beyond the unallocated |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
577 // block does not cause the assertion to fire. |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
578 assert((BlockOffsetArrayUseUnallocatedBlock && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
579 (!is_in(prev))) || |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
580 (blk_start_addr == block_start(region_start_addr)), "invariant"); |
0 | 581 } else { |
582 region_start_addr = mr.start(); | |
583 blk_start_addr = block_start(region_start_addr); | |
584 } | |
585 HeapWord* region_end_addr = mr.end(); | |
586 MemRegion derived_mr(region_start_addr, region_end_addr); | |
587 while (blk_start_addr < region_end_addr) { | |
588 const size_t size = block_size(blk_start_addr); | |
589 if (block_is_obj(blk_start_addr)) { | |
590 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); | |
591 } else { | |
592 last_was_obj_array = false; | |
593 } | |
594 blk_start_addr += size; | |
595 } | |
596 if (!last_was_obj_array) { | |
597 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), | |
598 "Should be within (closed) used space"); | |
599 assert(blk_start_addr > prev, "Invariant"); | |
600 cl->set_previous(blk_start_addr); // min address for next time | |
601 } | |
602 } | |
603 | |
604 bool Space::obj_is_alive(const HeapWord* p) const { | |
605 assert (block_is_obj(p), "The address should point to an object"); | |
606 return true; | |
607 } | |
608 | |
609 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { | |
610 assert(!mr.is_empty(), "Should be non-empty"); | |
611 assert(used_region().contains(mr), "Should be within used space"); | |
612 HeapWord* prev = cl->previous(); // max address from last time | |
613 if (prev >= mr.end()) { // nothing to do | |
614 return; | |
615 } | |
616 // See comment above (in more general method above) in case you | |
617 // happen to use this method. | |
618 assert(prev == NULL || is_in_reserved(prev), "Should be within space"); | |
619 | |
620 bool last_was_obj_array = false; | |
621 HeapWord *obj_start_addr, *region_start_addr; | |
622 if (prev > mr.start()) { | |
623 region_start_addr = prev; | |
624 obj_start_addr = prev; | |
625 assert(obj_start_addr == block_start(region_start_addr), "invariant"); | |
626 } else { | |
627 region_start_addr = mr.start(); | |
628 obj_start_addr = block_start(region_start_addr); | |
629 } | |
630 HeapWord* region_end_addr = mr.end(); | |
631 MemRegion derived_mr(region_start_addr, region_end_addr); | |
632 while (obj_start_addr < region_end_addr) { | |
633 oop obj = oop(obj_start_addr); | |
634 const size_t size = obj->size(); | |
635 last_was_obj_array = cl->do_object_bm(obj, derived_mr); | |
636 obj_start_addr += size; | |
637 } | |
638 if (!last_was_obj_array) { | |
639 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), | |
640 "Should be within (closed) used space"); | |
641 assert(obj_start_addr > prev, "Invariant"); | |
642 cl->set_previous(obj_start_addr); // min address for next time | |
643 } | |
644 } | |
645 | |
646 #ifndef SERIALGC | |
647 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ | |
648 \ | |
649 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ | |
650 HeapWord* obj_addr = mr.start(); \ | |
651 HeapWord* t = mr.end(); \ | |
652 while (obj_addr < t) { \ | |
653 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ | |
654 obj_addr += oop(obj_addr)->oop_iterate(blk); \ | |
655 } \ | |
656 } | |
657 | |
658 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) | |
659 | |
660 #undef ContigSpace_PAR_OOP_ITERATE_DEFN | |
661 #endif // SERIALGC | |
662 | |
663 void ContiguousSpace::oop_iterate(OopClosure* blk) { | |
664 if (is_empty()) return; | |
665 HeapWord* obj_addr = bottom(); | |
666 HeapWord* t = top(); | |
667 // Could call objects iterate, but this is easier. | |
668 while (obj_addr < t) { | |
669 obj_addr += oop(obj_addr)->oop_iterate(blk); | |
670 } | |
671 } | |
672 | |
673 void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) { | |
674 if (is_empty()) { | |
675 return; | |
676 } | |
677 MemRegion cur = MemRegion(bottom(), top()); | |
678 mr = mr.intersection(cur); | |
679 if (mr.is_empty()) { | |
680 return; | |
681 } | |
682 if (mr.equals(cur)) { | |
683 oop_iterate(blk); | |
684 return; | |
685 } | |
686 assert(mr.end() <= top(), "just took an intersection above"); | |
687 HeapWord* obj_addr = block_start(mr.start()); | |
688 HeapWord* t = mr.end(); | |
689 | |
690 // Handle first object specially. | |
691 oop obj = oop(obj_addr); | |
692 SpaceMemRegionOopsIterClosure smr_blk(blk, mr); | |
693 obj_addr += obj->oop_iterate(&smr_blk); | |
694 while (obj_addr < t) { | |
695 oop obj = oop(obj_addr); | |
696 assert(obj->is_oop(), "expected an oop"); | |
697 obj_addr += obj->size(); | |
698 // If "obj_addr" is not greater than top, then the | |
699 // entire object "obj" is within the region. | |
700 if (obj_addr <= t) { | |
701 obj->oop_iterate(blk); | |
702 } else { | |
703 // "obj" extends beyond end of region | |
704 obj->oop_iterate(&smr_blk); | |
705 break; | |
706 } | |
707 }; | |
708 } | |
709 | |
710 void ContiguousSpace::object_iterate(ObjectClosure* blk) { | |
711 if (is_empty()) return; | |
712 WaterMark bm = bottom_mark(); | |
713 object_iterate_from(bm, blk); | |
714 } | |
715 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
716 // For a continguous space object_iterate() and safe_object_iterate() |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
717 // are the same. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
718 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
719 object_iterate(blk); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
720 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
721 |
0 | 722 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { |
723 assert(mark.space() == this, "Mark does not match space"); | |
724 HeapWord* p = mark.point(); | |
725 while (p < top()) { | |
726 blk->do_object(oop(p)); | |
727 p += oop(p)->size(); | |
728 } | |
729 } | |
730 | |
731 HeapWord* | |
732 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { | |
733 HeapWord * limit = concurrent_iteration_safe_limit(); | |
734 assert(limit <= top(), "sanity check"); | |
735 for (HeapWord* p = bottom(); p < limit;) { | |
736 size_t size = blk->do_object_careful(oop(p)); | |
737 if (size == 0) { | |
738 return p; // failed at p | |
739 } else { | |
740 p += size; | |
741 } | |
742 } | |
743 return NULL; // all done | |
744 } | |
745 | |
746 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
747 \ | |
748 void ContiguousSpace:: \ | |
749 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
750 HeapWord* t; \ | |
751 HeapWord* p = saved_mark_word(); \ | |
752 assert(p != NULL, "expected saved mark"); \ | |
753 \ | |
754 const intx interval = PrefetchScanIntervalInBytes; \ | |
755 do { \ | |
756 t = top(); \ | |
757 while (p < t) { \ | |
758 Prefetch::write(p, interval); \ | |
759 debug_only(HeapWord* prev = p); \ | |
760 oop m = oop(p); \ | |
761 p += m->oop_iterate(blk); \ | |
762 } \ | |
763 } while (t < top()); \ | |
764 \ | |
765 set_saved_mark_word(p); \ | |
766 } | |
767 | |
768 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) | |
769 | |
770 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN | |
771 | |
772 // Very general, slow implementation. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
773 HeapWord* ContiguousSpace::block_start_const(const void* p) const { |
0 | 774 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
775 if (p >= top()) { | |
776 return top(); | |
777 } else { | |
778 HeapWord* last = bottom(); | |
779 HeapWord* cur = last; | |
780 while (cur <= p) { | |
781 last = cur; | |
782 cur += oop(cur)->size(); | |
783 } | |
784 assert(oop(last)->is_oop(), "Should be an object start"); | |
785 return last; | |
786 } | |
787 } | |
788 | |
789 size_t ContiguousSpace::block_size(const HeapWord* p) const { | |
790 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
791 HeapWord* current_top = top(); | |
792 assert(p <= current_top, "p is not a block start"); | |
793 assert(p == current_top || oop(p)->is_oop(), "p is not a block start"); | |
794 if (p < current_top) | |
795 return oop(p)->size(); | |
796 else { | |
797 assert(p == current_top, "just checking"); | |
798 return pointer_delta(end(), (HeapWord*) p); | |
799 } | |
800 } | |
801 | |
802 // This version requires locking. | |
803 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, | |
804 HeapWord* const end_value) { | |
805 assert(Heap_lock->owned_by_self() || | |
806 (SafepointSynchronize::is_at_safepoint() && | |
807 Thread::current()->is_VM_thread()), | |
808 "not locked"); | |
809 HeapWord* obj = top(); | |
810 if (pointer_delta(end_value, obj) >= size) { | |
811 HeapWord* new_top = obj + size; | |
812 set_top(new_top); | |
813 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); | |
814 return obj; | |
815 } else { | |
816 return NULL; | |
817 } | |
818 } | |
819 | |
820 // This version is lock-free. | |
821 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, | |
822 HeapWord* const end_value) { | |
823 do { | |
824 HeapWord* obj = top(); | |
825 if (pointer_delta(end_value, obj) >= size) { | |
826 HeapWord* new_top = obj + size; | |
827 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); | |
828 // result can be one of two: | |
829 // the old top value: the exchange succeeded | |
830 // otherwise: the new value of the top is returned. | |
831 if (result == obj) { | |
832 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); | |
833 return obj; | |
834 } | |
835 } else { | |
836 return NULL; | |
837 } | |
838 } while (true); | |
839 } | |
840 | |
841 // Requires locking. | |
842 HeapWord* ContiguousSpace::allocate(size_t size) { | |
843 return allocate_impl(size, end()); | |
844 } | |
845 | |
846 // Lock-free. | |
847 HeapWord* ContiguousSpace::par_allocate(size_t size) { | |
848 return par_allocate_impl(size, end()); | |
849 } | |
850 | |
851 void ContiguousSpace::allocate_temporary_filler(int factor) { | |
852 // allocate temporary type array decreasing free size with factor 'factor' | |
853 assert(factor >= 0, "just checking"); | |
854 size_t size = pointer_delta(end(), top()); | |
855 | |
856 // if space is full, return | |
857 if (size == 0) return; | |
858 | |
859 if (factor > 0) { | |
860 size -= size/factor; | |
861 } | |
862 size = align_object_size(size); | |
863 | |
864 const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT); | |
865 if (size >= min_int_array_size) { | |
866 size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint)); | |
867 // allocate uninitialized int array | |
868 typeArrayOop t = (typeArrayOop) allocate(size); | |
869 assert(t != NULL, "allocation should succeed"); | |
870 t->set_mark(markOopDesc::prototype()); | |
871 t->set_klass(Universe::intArrayKlassObj()); | |
872 t->set_length((int)length); | |
873 } else { | |
874 assert((int) size == instanceOopDesc::header_size(), | |
875 "size for smallest fake object doesn't match"); | |
876 instanceOop obj = (instanceOop) allocate(size); | |
877 obj->set_mark(markOopDesc::prototype()); | |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
878 obj->set_klass_gap(0); |
1142 | 879 obj->set_klass(SystemDictionary::Object_klass()); |
0 | 880 } |
881 } | |
882 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
883 void EdenSpace::clear(bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
884 ContiguousSpace::clear(mangle_space); |
0 | 885 set_soft_end(end()); |
886 } | |
887 | |
888 // Requires locking. | |
889 HeapWord* EdenSpace::allocate(size_t size) { | |
890 return allocate_impl(size, soft_end()); | |
891 } | |
892 | |
893 // Lock-free. | |
894 HeapWord* EdenSpace::par_allocate(size_t size) { | |
895 return par_allocate_impl(size, soft_end()); | |
896 } | |
897 | |
898 HeapWord* ConcEdenSpace::par_allocate(size_t size) | |
899 { | |
900 do { | |
901 // The invariant is top() should be read before end() because | |
902 // top() can't be greater than end(), so if an update of _soft_end | |
903 // occurs between 'end_val = end();' and 'top_val = top();' top() | |
904 // also can grow up to the new end() and the condition | |
905 // 'top_val > end_val' is true. To ensure the loading order | |
906 // OrderAccess::loadload() is required after top() read. | |
907 HeapWord* obj = top(); | |
908 OrderAccess::loadload(); | |
909 if (pointer_delta(*soft_end_addr(), obj) >= size) { | |
910 HeapWord* new_top = obj + size; | |
911 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); | |
912 // result can be one of two: | |
913 // the old top value: the exchange succeeded | |
914 // otherwise: the new value of the top is returned. | |
915 if (result == obj) { | |
916 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); | |
917 return obj; | |
918 } | |
919 } else { | |
920 return NULL; | |
921 } | |
922 } while (true); | |
923 } | |
924 | |
925 | |
926 HeapWord* OffsetTableContigSpace::initialize_threshold() { | |
927 return _offsets.initialize_threshold(); | |
928 } | |
929 | |
930 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { | |
931 _offsets.alloc_block(start, end); | |
932 return _offsets.threshold(); | |
933 } | |
934 | |
935 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, | |
936 MemRegion mr) : | |
937 _offsets(sharedOffsetArray, mr), | |
938 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) | |
939 { | |
940 _offsets.set_contig_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
941 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 942 } |
943 | |
944 | |
945 class VerifyOldOopClosure : public OopClosure { | |
946 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
947 oop _the_obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
948 bool _allow_dirty; |
0 | 949 void do_oop(oop* p) { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
950 _the_obj->verify_old_oop(p, _allow_dirty); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
951 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
952 void do_oop(narrowOop* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
953 _the_obj->verify_old_oop(p, _allow_dirty); |
0 | 954 } |
955 }; | |
956 | |
957 #define OBJ_SAMPLE_INTERVAL 0 | |
958 #define BLOCK_SAMPLE_INTERVAL 100 | |
959 | |
960 void OffsetTableContigSpace::verify(bool allow_dirty) const { | |
961 HeapWord* p = bottom(); | |
962 HeapWord* prev_p = NULL; | |
963 VerifyOldOopClosure blk; // Does this do anything? | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
964 blk._allow_dirty = allow_dirty; |
0 | 965 int objs = 0; |
966 int blocks = 0; | |
967 | |
968 if (VerifyObjectStartArray) { | |
969 _offsets.verify(); | |
970 } | |
971 | |
972 while (p < top()) { | |
973 size_t size = oop(p)->size(); | |
974 // For a sampling of objects in the space, find it using the | |
975 // block offset table. | |
976 if (blocks == BLOCK_SAMPLE_INTERVAL) { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
977 guarantee(p == block_start_const(p + (size/2)), |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
978 "check offset computation"); |
0 | 979 blocks = 0; |
980 } else { | |
981 blocks++; | |
982 } | |
983 | |
984 if (objs == OBJ_SAMPLE_INTERVAL) { | |
985 oop(p)->verify(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
986 blk._the_obj = oop(p); |
0 | 987 oop(p)->oop_iterate(&blk); |
988 objs = 0; | |
989 } else { | |
990 objs++; | |
991 } | |
992 prev_p = p; | |
993 p += size; | |
994 } | |
995 guarantee(p == top(), "end of last object must match end of space"); | |
996 } | |
997 | |
998 void OffsetTableContigSpace::serialize_block_offset_array_offsets( | |
999 SerializeOopClosure* soc) { | |
1000 _offsets.serialize(soc); | |
1001 } | |
1002 | |
1003 | |
438 | 1004 size_t TenuredSpace::allowed_dead_ratio() const { |
0 | 1005 return MarkSweepDeadRatio; |
1006 } | |
1007 | |
1008 | |
438 | 1009 size_t ContigPermSpace::allowed_dead_ratio() const { |
0 | 1010 return PermMarkSweepDeadRatio; |
1011 } |