Mercurial > hg > truffle
annotate src/share/vm/memory/space.cpp @ 17937:78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
8043029: Change 8037816 breaks HS build with older GCC versions which don't support diagnostic pragmas
8043164: Format warning in traceStream.hpp
Summary: Backport of main fix + two corrections, enables clang compilation, turns on format attributes, corrects/mutes warnings
Reviewed-by: kvn, coleenp, iveresov, twisti
author | drchase |
---|---|
date | Thu, 22 May 2014 15:52:41 -0400 |
parents | 55fb97c4c58d |
children | 52b4284cb496 ce8f6bb717c9 |
rev | line source |
---|---|
0 | 1 /* |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
17467
diff
changeset
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/systemDictionary.hpp" | |
27 #include "classfile/vmSymbols.hpp" | |
28 #include "gc_implementation/shared/liveRange.hpp" | |
29 #include "gc_implementation/shared/markSweep.hpp" | |
30 #include "gc_implementation/shared/spaceDecorator.hpp" | |
31 #include "memory/blockOffsetTable.inline.hpp" | |
32 #include "memory/defNewGeneration.hpp" | |
33 #include "memory/genCollectedHeap.hpp" | |
34 #include "memory/space.hpp" | |
35 #include "memory/space.inline.hpp" | |
36 #include "memory/universe.inline.hpp" | |
37 #include "oops/oop.inline.hpp" | |
38 #include "oops/oop.inline2.hpp" | |
39 #include "runtime/java.hpp" | |
40 #include "runtime/safepoint.hpp" | |
41 #include "utilities/copy.hpp" | |
42 #include "utilities/globalDefinitions.hpp" | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
7448
diff
changeset
|
43 #include "utilities/macros.hpp" |
0 | 44 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
45 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
46 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
47 |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
17467
diff
changeset
|
48 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
17467
diff
changeset
|
49 |
0 | 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, |
51 HeapWord* top_obj) { | |
52 if (top_obj != NULL) { | |
53 if (_sp->block_is_obj(top_obj)) { | |
54 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { | |
55 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { | |
56 // An arrayOop is starting on the dirty card - since we do exact | |
57 // store checks for objArrays we are done. | |
58 } else { | |
59 // Otherwise, it is possible that the object starting on the dirty | |
60 // card spans the entire card, and that the store happened on a | |
61 // later card. Figure out where the object ends. | |
62 // Use the block_size() method of the space over which | |
63 // the iteration is being done. That space (e.g. CMS) may have | |
64 // specific requirements on object sizes which will | |
65 // be reflected in the block_size() method. | |
66 top = top_obj + oop(top_obj)->size(); | |
67 } | |
68 } | |
69 } else { | |
70 top = top_obj; | |
71 } | |
72 } else { | |
73 assert(top == _sp->end(), "only case where top_obj == NULL"); | |
74 } | |
75 return top; | |
76 } | |
77 | |
78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, | |
79 HeapWord* bottom, | |
80 HeapWord* top) { | |
81 // 1. Blocks may or may not be objects. | |
82 // 2. Even when a block_is_obj(), it may not entirely | |
83 // occupy the block if the block quantum is larger than | |
84 // the object size. | |
85 // We can and should try to optimize by calling the non-MemRegion | |
86 // version of oop_iterate() for all but the extremal objects | |
87 // (for which we need to call the MemRegion version of | |
88 // oop_iterate()) To be done post-beta XXX | |
89 for (; bottom < top; bottom += _sp->block_size(bottom)) { | |
90 // As in the case of contiguous space above, we'd like to | |
91 // just use the value returned by oop_iterate to increment the | |
92 // current pointer; unfortunately, that won't work in CMS because | |
93 // we'd need an interface change (it seems) to have the space | |
94 // "adjust the object size" (for instance pad it up to its | |
95 // block alignment or minimum block size restrictions. XXX | |
96 if (_sp->block_is_obj(bottom) && | |
97 !_sp->obj_allocated_since_save_marks(oop(bottom))) { | |
98 oop(bottom)->oop_iterate(_cl, mr); | |
99 } | |
100 } | |
101 } | |
102 | |
3357
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
103 // We get called with "mr" representing the dirty region |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
104 // that we want to process. Because of imprecise marking, |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
105 // we may need to extend the incoming "mr" to the right, |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
106 // and scan more. However, because we may already have |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
107 // scanned some of that extended region, we may need to |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
108 // trim its right-end back some so we do not scan what |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
109 // we (or another worker thread) may already have scanned |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
110 // or planning to scan. |
0 | 111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { |
112 | |
113 // Some collectors need to do special things whenever their dirty | |
114 // cards are processed. For instance, CMS must remember mutator updates | |
115 // (i.e. dirty cards) so as to re-scan mutated objects. | |
116 // Such work can be piggy-backed here on dirty card scanning, so as to make | |
117 // it slightly more efficient than doing a complete non-detructive pre-scan | |
118 // of the card table. | |
119 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); | |
120 if (pCl != NULL) { | |
121 pCl->do_MemRegion(mr); | |
122 } | |
123 | |
124 HeapWord* bottom = mr.start(); | |
125 HeapWord* last = mr.last(); | |
126 HeapWord* top = mr.end(); | |
127 HeapWord* bottom_obj; | |
128 HeapWord* top_obj; | |
129 | |
130 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || | |
131 _precision == CardTableModRefBS::Precise, | |
132 "Only ones we deal with for now."); | |
133 | |
134 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
135 _cl->idempotent() || _last_bottom == NULL || |
0 | 136 top <= _last_bottom, |
137 "Not decreasing"); | |
138 NOT_PRODUCT(_last_bottom = mr.start()); | |
139 | |
140 bottom_obj = _sp->block_start(bottom); | |
141 top_obj = _sp->block_start(last); | |
142 | |
143 assert(bottom_obj <= bottom, "just checking"); | |
144 assert(top_obj <= top, "just checking"); | |
145 | |
146 // Given what we think is the top of the memory region and | |
147 // the start of the object at the top, get the actual | |
148 // value of the top. | |
149 top = get_actual_top(top, top_obj); | |
150 | |
151 // If the previous call did some part of this region, don't redo. | |
152 if (_precision == CardTableModRefBS::ObjHeadPreciseArray && | |
153 _min_done != NULL && | |
154 _min_done < top) { | |
155 top = _min_done; | |
156 } | |
157 | |
158 // Top may have been reset, and in fact may be below bottom, | |
159 // e.g. the dirty card region is entirely in a now free object | |
160 // -- something that could happen with a concurrent sweeper. | |
161 bottom = MIN2(bottom, top); | |
3357
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
162 MemRegion extended_mr = MemRegion(bottom, top); |
0 | 163 assert(bottom <= top && |
164 (_precision != CardTableModRefBS::ObjHeadPreciseArray || | |
165 _min_done == NULL || | |
166 top <= _min_done), | |
167 "overlap!"); | |
168 | |
169 // Walk the region if it is not empty; otherwise there is nothing to do. | |
3357
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
170 if (!extended_mr.is_empty()) { |
fc2b798ab316
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
2433
diff
changeset
|
171 walk_mem_region(extended_mr, bottom_obj, top); |
0 | 172 } |
173 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
174 // An idempotent closure might be applied in any order, so we don't |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
175 // record a _min_done for it. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
176 if (!_cl->idempotent()) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
177 _min_done = bottom; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
178 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
179 assert(_min_done == _last_explicit_min_done, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
180 "Don't update _min_done for idempotent cl"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
181 } |
0 | 182 } |
183 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
184 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, |
0 | 185 CardTableModRefBS::PrecisionStyle precision, |
186 HeapWord* boundary) { | |
187 return new DirtyCardToOopClosure(this, cl, precision, boundary); | |
188 } | |
189 | |
190 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, | |
191 HeapWord* top_obj) { | |
192 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { | |
193 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { | |
194 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { | |
195 // An arrayOop is starting on the dirty card - since we do exact | |
196 // store checks for objArrays we are done. | |
197 } else { | |
198 // Otherwise, it is possible that the object starting on the dirty | |
199 // card spans the entire card, and that the store happened on a | |
200 // later card. Figure out where the object ends. | |
201 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), | |
202 "Block size and object size mismatch"); | |
203 top = top_obj + oop(top_obj)->size(); | |
204 } | |
205 } | |
206 } else { | |
207 top = (_sp->toContiguousSpace())->top(); | |
208 } | |
209 return top; | |
210 } | |
211 | |
212 void Filtering_DCTOC::walk_mem_region(MemRegion mr, | |
213 HeapWord* bottom, | |
214 HeapWord* top) { | |
215 // Note that this assumption won't hold if we have a concurrent | |
216 // collector in this space, which may have freed up objects after | |
217 // they were dirtied and before the stop-the-world GC that is | |
218 // examining cards here. | |
219 assert(bottom < top, "ought to be at least one obj on a dirty card."); | |
220 | |
221 if (_boundary != NULL) { | |
222 // We have a boundary outside of which we don't want to look | |
223 // at objects, so create a filtering closure around the | |
224 // oop closure before walking the region. | |
225 FilteringClosure filter(_boundary, _cl); | |
226 walk_mem_region_with_cl(mr, bottom, top, &filter); | |
227 } else { | |
228 // No boundary, simply walk the heap with the oop closure. | |
229 walk_mem_region_with_cl(mr, bottom, top, _cl); | |
230 } | |
231 | |
232 } | |
233 | |
234 // We must replicate this so that the static type of "FilteringClosure" | |
235 // (see above) is apparent at the oop_iterate calls. | |
236 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ | |
237 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
238 HeapWord* bottom, \ | |
239 HeapWord* top, \ | |
240 ClosureType* cl) { \ | |
241 bottom += oop(bottom)->oop_iterate(cl, mr); \ | |
242 if (bottom < top) { \ | |
243 HeapWord* next_obj = bottom + oop(bottom)->size(); \ | |
244 while (next_obj < top) { \ | |
245 /* Bottom lies entirely below top, so we can call the */ \ | |
246 /* non-memRegion version of oop_iterate below. */ \ | |
247 oop(bottom)->oop_iterate(cl); \ | |
248 bottom = next_obj; \ | |
249 next_obj = bottom + oop(bottom)->size(); \ | |
250 } \ | |
251 /* Last object. */ \ | |
252 oop(bottom)->oop_iterate(cl, mr); \ | |
253 } \ | |
254 } | |
255 | |
256 // (There are only two of these, rather than N, because the split is due | |
257 // only to the introduction of the FilteringClosure, a local part of the | |
258 // impl of this abstraction.) | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) |
0 | 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
261 | |
262 DirtyCardToOopClosure* | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
263 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, |
0 | 264 CardTableModRefBS::PrecisionStyle precision, |
265 HeapWord* boundary) { | |
266 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); | |
267 } | |
268 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
269 void Space::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
270 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
271 bool mangle_space) { |
0 | 272 HeapWord* bottom = mr.start(); |
273 HeapWord* end = mr.end(); | |
274 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), | |
275 "invalid space boundaries"); | |
276 set_bottom(bottom); | |
277 set_end(end); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
278 if (clear_space) clear(mangle_space); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
279 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
280 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
281 void Space::clear(bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
282 if (ZapUnusedHeapArea && mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
283 mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
284 } |
0 | 285 } |
286 | |
356 | 287 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), |
288 _concurrent_iteration_safe_limit(NULL) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
289 _mangler = new GenSpaceMangler(this); |
0 | 290 } |
291 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
292 ContiguousSpace::~ContiguousSpace() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
293 delete _mangler; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
294 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
295 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
296 void ContiguousSpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
297 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
298 bool mangle_space) |
0 | 299 { |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
300 CompactibleSpace::initialize(mr, clear_space, mangle_space); |
347
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
344
diff
changeset
|
301 set_concurrent_iteration_safe_limit(top()); |
0 | 302 } |
303 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
304 void ContiguousSpace::clear(bool mangle_space) { |
0 | 305 set_top(bottom()); |
306 set_saved_mark(); | |
356 | 307 CompactibleSpace::clear(mangle_space); |
0 | 308 } |
309 | |
310 bool ContiguousSpace::is_in(const void* p) const { | |
311 return _bottom <= p && p < _top; | |
312 } | |
313 | |
314 bool ContiguousSpace::is_free_block(const HeapWord* p) const { | |
315 return p >= _top; | |
316 } | |
317 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
318 void OffsetTableContigSpace::clear(bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
319 ContiguousSpace::clear(mangle_space); |
0 | 320 _offsets.initialize_threshold(); |
321 } | |
322 | |
323 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { | |
324 Space::set_bottom(new_bottom); | |
325 _offsets.set_bottom(new_bottom); | |
326 } | |
327 | |
328 void OffsetTableContigSpace::set_end(HeapWord* new_end) { | |
329 // Space should not advertize an increase in size | |
330 // until after the underlying offest table has been enlarged. | |
331 _offsets.resize(pointer_delta(new_end, bottom())); | |
332 Space::set_end(new_end); | |
333 } | |
334 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
335 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
336 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
337 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
338 mangler()->set_top_for_allocations(v); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
339 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
340 void ContiguousSpace::set_top_for_allocations() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
341 mangler()->set_top_for_allocations(top()); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
342 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
343 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
344 mangler()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
345 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
346 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
347 void ContiguousSpace::check_mangled_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
348 mangler()->check_mangled_unused_area_complete(); |
0 | 349 } |
350 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
351 // Mangled only the unused space that has not previously |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
352 // been mangled and that has not been allocated since being |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
353 // mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
354 void ContiguousSpace::mangle_unused_area() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
355 mangler()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
356 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
357 void ContiguousSpace::mangle_unused_area_complete() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
358 mangler()->mangle_unused_area_complete(); |
0 | 359 } |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
360 void ContiguousSpace::mangle_region(MemRegion mr) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
361 // Although this method uses SpaceMangler::mangle_region() which |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
362 // is not specific to a space, the when the ContiguousSpace version |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
363 // is called, it is always with regard to a space and this |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
364 // bounds checking is appropriate. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
365 MemRegion space_mr(bottom(), end()); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
366 assert(space_mr.contains(mr), "Mangling outside space"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
367 SpaceMangler::mangle_region(mr); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
368 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
369 #endif // NOT_PRODUCT |
0 | 370 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
371 void CompactibleSpace::initialize(MemRegion mr, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
372 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
373 bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
374 Space::initialize(mr, clear_space, mangle_space); |
356 | 375 set_compaction_top(bottom()); |
376 _next_compaction_space = NULL; | |
377 } | |
378 | |
379 void CompactibleSpace::clear(bool mangle_space) { | |
380 Space::clear(mangle_space); | |
0 | 381 _compaction_top = bottom(); |
382 } | |
383 | |
384 HeapWord* CompactibleSpace::forward(oop q, size_t size, | |
385 CompactPoint* cp, HeapWord* compact_top) { | |
386 // q is alive | |
387 // First check if we should switch compaction space | |
388 assert(this == cp->space, "'this' should be current compaction space."); | |
389 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
390 while (size > compaction_max_size) { | |
391 // switch to next compaction space | |
392 cp->space->set_compaction_top(compact_top); | |
393 cp->space = cp->space->next_compaction_space(); | |
394 if (cp->space == NULL) { | |
395 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
396 assert(cp->gen != NULL, "compaction must succeed"); | |
397 cp->space = cp->gen->first_compaction_space(); | |
398 assert(cp->space != NULL, "generation must have a first compaction space"); | |
399 } | |
400 compact_top = cp->space->bottom(); | |
401 cp->space->set_compaction_top(compact_top); | |
402 cp->threshold = cp->space->initialize_threshold(); | |
403 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
404 } | |
405 | |
406 // store the forwarding pointer into the mark word | |
407 if ((HeapWord*)q != compact_top) { | |
408 q->forward_to(oop(compact_top)); | |
409 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
410 } else { | |
411 // if the object isn't moving we can just set the mark to the default | |
412 // mark and handle it specially later on. | |
413 q->init_mark(); | |
414 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
415 } | |
416 | |
417 compact_top += size; | |
418 | |
419 // we need to update the offset table so that the beginnings of objects can be | |
420 // found during scavenge. Note that we are updating the offset table based on | |
421 // where the object will be once the compaction phase finishes. | |
422 if (compact_top > cp->threshold) | |
423 cp->threshold = | |
424 cp->space->cross_threshold(compact_top - size, compact_top); | |
425 return compact_top; | |
426 } | |
427 | |
428 | |
429 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, | |
430 HeapWord* q, size_t deadlength) { | |
431 if (allowed_deadspace_words >= deadlength) { | |
432 allowed_deadspace_words -= deadlength; | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
438
diff
changeset
|
433 CollectedHeap::fill_with_object(q, deadlength); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
438
diff
changeset
|
434 oop(q)->set_mark(oop(q)->mark()->set_marked()); |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
438
diff
changeset
|
435 assert((int) deadlength == oop(q)->size(), "bad filler object size"); |
0 | 436 // Recall that we required "q == compaction_top". |
437 return true; | |
438 } else { | |
439 allowed_deadspace_words = 0; | |
440 return false; | |
441 } | |
442 } | |
443 | |
444 #define block_is_always_obj(q) true | |
445 #define obj_size(q) oop(q)->size() | |
446 #define adjust_obj_size(s) s | |
447 | |
448 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { | |
449 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); | |
450 } | |
451 | |
452 // Faster object search. | |
453 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { | |
454 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); | |
455 } | |
456 | |
457 void Space::adjust_pointers() { | |
458 // adjust all the interior pointers to point at the new locations of objects | |
459 // Used by MarkSweep::mark_sweep_phase3() | |
460 | |
461 // First check to see if there is any work to be done. | |
462 if (used() == 0) { | |
463 return; // Nothing to do. | |
464 } | |
465 | |
466 // Otherwise... | |
467 HeapWord* q = bottom(); | |
468 HeapWord* t = end(); | |
469 | |
470 debug_only(HeapWord* prev_q = NULL); | |
471 while (q < t) { | |
472 if (oop(q)->is_gc_marked()) { | |
473 // q is alive | |
474 | |
475 // point all the oops to the new location | |
476 size_t size = oop(q)->adjust_pointers(); | |
477 | |
478 debug_only(prev_q = q); | |
479 | |
480 q += size; | |
481 } else { | |
482 // q is not a live object. But we're not in a compactible space, | |
483 // So we don't have live ranges. | |
484 debug_only(prev_q = q); | |
485 q += block_size(q); | |
486 assert(q > prev_q, "we should be moving forward through memory"); | |
487 } | |
488 } | |
489 assert(q == t, "just checking"); | |
490 } | |
491 | |
492 void CompactibleSpace::adjust_pointers() { | |
493 // Check first is there is any work to do. | |
494 if (used() == 0) { | |
495 return; // Nothing to do. | |
496 } | |
497 | |
498 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
499 } | |
500 | |
501 void CompactibleSpace::compact() { | |
502 SCAN_AND_COMPACT(obj_size); | |
503 } | |
504 | |
505 void Space::print_short() const { print_short_on(tty); } | |
506 | |
507 void Space::print_short_on(outputStream* st) const { | |
508 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, | |
509 (int) ((double) used() * 100 / capacity())); | |
510 } | |
511 | |
512 void Space::print() const { print_on(tty); } | |
513 | |
514 void Space::print_on(outputStream* st) const { | |
515 print_short_on(st); | |
516 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
517 bottom(), end()); | |
518 } | |
519 | |
520 void ContiguousSpace::print_on(outputStream* st) const { | |
521 print_short_on(st); | |
522 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
523 bottom(), top(), end()); | |
524 } | |
525 | |
526 void OffsetTableContigSpace::print_on(outputStream* st) const { | |
527 print_short_on(st); | |
528 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " | |
529 INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
530 bottom(), top(), _offsets.threshold(), end()); | |
531 } | |
532 | |
6008 | 533 void ContiguousSpace::verify() const { |
0 | 534 HeapWord* p = bottom(); |
535 HeapWord* t = top(); | |
536 HeapWord* prev_p = NULL; | |
537 while (p < t) { | |
538 oop(p)->verify(); | |
539 prev_p = p; | |
540 p += oop(p)->size(); | |
541 } | |
542 guarantee(p == top(), "end of last object must match end of space"); | |
543 if (top() != end()) { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
544 guarantee(top() == block_start_const(end()-1) && |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
545 top() == block_start_const(top()), |
0 | 546 "top should be start of unallocated block, if it exists"); |
547 } | |
548 } | |
549 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
550 void Space::oop_iterate(ExtendedOopClosure* blk) { |
0 | 551 ObjectToOopClosure blk2(blk); |
552 object_iterate(&blk2); | |
553 } | |
554 | |
555 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) { | |
556 guarantee(false, "NYI"); | |
557 return bottom(); | |
558 } | |
559 | |
560 HeapWord* Space::object_iterate_careful_m(MemRegion mr, | |
561 ObjectClosureCareful* cl) { | |
562 guarantee(false, "NYI"); | |
563 return bottom(); | |
564 } | |
565 | |
566 | |
567 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { | |
568 assert(!mr.is_empty(), "Should be non-empty"); | |
569 // We use MemRegion(bottom(), end()) rather than used_region() below | |
570 // because the two are not necessarily equal for some kinds of | |
571 // spaces, in particular, certain kinds of free list spaces. | |
572 // We could use the more complicated but more precise: | |
573 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) | |
574 // but the slight imprecision seems acceptable in the assertion check. | |
575 assert(MemRegion(bottom(), end()).contains(mr), | |
576 "Should be within used space"); | |
577 HeapWord* prev = cl->previous(); // max address from last time | |
578 if (prev >= mr.end()) { // nothing to do | |
579 return; | |
580 } | |
581 // This assert will not work when we go from cms space to perm | |
582 // space, and use same closure. Easy fix deferred for later. XXX YSR | |
583 // assert(prev == NULL || contains(prev), "Should be within space"); | |
584 | |
585 bool last_was_obj_array = false; | |
586 HeapWord *blk_start_addr, *region_start_addr; | |
587 if (prev > mr.start()) { | |
588 region_start_addr = prev; | |
589 blk_start_addr = prev; | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
590 // The previous invocation may have pushed "prev" beyond the |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
591 // last allocated block yet there may be still be blocks |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
592 // in this region due to a particular coalescing policy. |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
593 // Relax the assertion so that the case where the unallocated |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
594 // block is maintained and "prev" is beyond the unallocated |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
595 // block does not cause the assertion to fire. |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
596 assert((BlockOffsetArrayUseUnallocatedBlock && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
597 (!is_in(prev))) || |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
598 (blk_start_addr == block_start(region_start_addr)), "invariant"); |
0 | 599 } else { |
600 region_start_addr = mr.start(); | |
601 blk_start_addr = block_start(region_start_addr); | |
602 } | |
603 HeapWord* region_end_addr = mr.end(); | |
604 MemRegion derived_mr(region_start_addr, region_end_addr); | |
605 while (blk_start_addr < region_end_addr) { | |
606 const size_t size = block_size(blk_start_addr); | |
607 if (block_is_obj(blk_start_addr)) { | |
608 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); | |
609 } else { | |
610 last_was_obj_array = false; | |
611 } | |
612 blk_start_addr += size; | |
613 } | |
614 if (!last_was_obj_array) { | |
615 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), | |
616 "Should be within (closed) used space"); | |
617 assert(blk_start_addr > prev, "Invariant"); | |
618 cl->set_previous(blk_start_addr); // min address for next time | |
619 } | |
620 } | |
621 | |
622 bool Space::obj_is_alive(const HeapWord* p) const { | |
623 assert (block_is_obj(p), "The address should point to an object"); | |
624 return true; | |
625 } | |
626 | |
627 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { | |
628 assert(!mr.is_empty(), "Should be non-empty"); | |
629 assert(used_region().contains(mr), "Should be within used space"); | |
630 HeapWord* prev = cl->previous(); // max address from last time | |
631 if (prev >= mr.end()) { // nothing to do | |
632 return; | |
633 } | |
634 // See comment above (in more general method above) in case you | |
635 // happen to use this method. | |
636 assert(prev == NULL || is_in_reserved(prev), "Should be within space"); | |
637 | |
638 bool last_was_obj_array = false; | |
639 HeapWord *obj_start_addr, *region_start_addr; | |
640 if (prev > mr.start()) { | |
641 region_start_addr = prev; | |
642 obj_start_addr = prev; | |
643 assert(obj_start_addr == block_start(region_start_addr), "invariant"); | |
644 } else { | |
645 region_start_addr = mr.start(); | |
646 obj_start_addr = block_start(region_start_addr); | |
647 } | |
648 HeapWord* region_end_addr = mr.end(); | |
649 MemRegion derived_mr(region_start_addr, region_end_addr); | |
650 while (obj_start_addr < region_end_addr) { | |
651 oop obj = oop(obj_start_addr); | |
652 const size_t size = obj->size(); | |
653 last_was_obj_array = cl->do_object_bm(obj, derived_mr); | |
654 obj_start_addr += size; | |
655 } | |
656 if (!last_was_obj_array) { | |
657 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), | |
658 "Should be within (closed) used space"); | |
659 assert(obj_start_addr > prev, "Invariant"); | |
660 cl->set_previous(obj_start_addr); // min address for next time | |
661 } | |
662 } | |
663 | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
7448
diff
changeset
|
664 #if INCLUDE_ALL_GCS |
0 | 665 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
666 \ | |
667 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ | |
668 HeapWord* obj_addr = mr.start(); \ | |
669 HeapWord* t = mr.end(); \ | |
670 while (obj_addr < t) { \ | |
671 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ | |
672 obj_addr += oop(obj_addr)->oop_iterate(blk); \ | |
673 } \ | |
674 } | |
675 | |
676 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) | |
677 | |
678 #undef ContigSpace_PAR_OOP_ITERATE_DEFN | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
7448
diff
changeset
|
679 #endif // INCLUDE_ALL_GCS |
0 | 680 |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
681 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { |
0 | 682 if (is_empty()) return; |
683 HeapWord* obj_addr = bottom(); | |
684 HeapWord* t = top(); | |
685 // Could call objects iterate, but this is easier. | |
686 while (obj_addr < t) { | |
687 obj_addr += oop(obj_addr)->oop_iterate(blk); | |
688 } | |
689 } | |
690 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
691 void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) { |
0 | 692 if (is_empty()) { |
693 return; | |
694 } | |
695 MemRegion cur = MemRegion(bottom(), top()); | |
696 mr = mr.intersection(cur); | |
697 if (mr.is_empty()) { | |
698 return; | |
699 } | |
700 if (mr.equals(cur)) { | |
701 oop_iterate(blk); | |
702 return; | |
703 } | |
704 assert(mr.end() <= top(), "just took an intersection above"); | |
705 HeapWord* obj_addr = block_start(mr.start()); | |
706 HeapWord* t = mr.end(); | |
707 | |
708 // Handle first object specially. | |
709 oop obj = oop(obj_addr); | |
710 SpaceMemRegionOopsIterClosure smr_blk(blk, mr); | |
711 obj_addr += obj->oop_iterate(&smr_blk); | |
712 while (obj_addr < t) { | |
713 oop obj = oop(obj_addr); | |
714 assert(obj->is_oop(), "expected an oop"); | |
715 obj_addr += obj->size(); | |
716 // If "obj_addr" is not greater than top, then the | |
717 // entire object "obj" is within the region. | |
718 if (obj_addr <= t) { | |
719 obj->oop_iterate(blk); | |
720 } else { | |
721 // "obj" extends beyond end of region | |
722 obj->oop_iterate(&smr_blk); | |
723 break; | |
724 } | |
725 }; | |
726 } | |
727 | |
728 void ContiguousSpace::object_iterate(ObjectClosure* blk) { | |
729 if (is_empty()) return; | |
730 WaterMark bm = bottom_mark(); | |
731 object_iterate_from(bm, blk); | |
732 } | |
733 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
734 // For a continguous space object_iterate() and safe_object_iterate() |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
735 // are the same. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
736 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
737 object_iterate(blk); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
738 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
481
diff
changeset
|
739 |
0 | 740 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { |
741 assert(mark.space() == this, "Mark does not match space"); | |
742 HeapWord* p = mark.point(); | |
743 while (p < top()) { | |
744 blk->do_object(oop(p)); | |
745 p += oop(p)->size(); | |
746 } | |
747 } | |
748 | |
749 HeapWord* | |
750 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { | |
751 HeapWord * limit = concurrent_iteration_safe_limit(); | |
752 assert(limit <= top(), "sanity check"); | |
753 for (HeapWord* p = bottom(); p < limit;) { | |
754 size_t size = blk->do_object_careful(oop(p)); | |
755 if (size == 0) { | |
756 return p; // failed at p | |
757 } else { | |
758 p += size; | |
759 } | |
760 } | |
761 return NULL; // all done | |
762 } | |
763 | |
764 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
765 \ | |
766 void ContiguousSpace:: \ | |
767 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
768 HeapWord* t; \ | |
769 HeapWord* p = saved_mark_word(); \ | |
770 assert(p != NULL, "expected saved mark"); \ | |
771 \ | |
772 const intx interval = PrefetchScanIntervalInBytes; \ | |
773 do { \ | |
774 t = top(); \ | |
775 while (p < t) { \ | |
776 Prefetch::write(p, interval); \ | |
777 debug_only(HeapWord* prev = p); \ | |
778 oop m = oop(p); \ | |
779 p += m->oop_iterate(blk); \ | |
780 } \ | |
781 } while (t < top()); \ | |
782 \ | |
783 set_saved_mark_word(p); \ | |
784 } | |
785 | |
786 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) | |
787 | |
788 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN | |
789 | |
790 // Very general, slow implementation. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
791 HeapWord* ContiguousSpace::block_start_const(const void* p) const { |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
792 assert(MemRegion(bottom(), end()).contains(p), |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
793 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
794 p, bottom(), end())); |
0 | 795 if (p >= top()) { |
796 return top(); | |
797 } else { | |
798 HeapWord* last = bottom(); | |
799 HeapWord* cur = last; | |
800 while (cur <= p) { | |
801 last = cur; | |
802 cur += oop(cur)->size(); | |
803 } | |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
804 assert(oop(last)->is_oop(), |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
805 err_msg(PTR_FORMAT " should be an object start", last)); |
0 | 806 return last; |
807 } | |
808 } | |
809 | |
810 size_t ContiguousSpace::block_size(const HeapWord* p) const { | |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
811 assert(MemRegion(bottom(), end()).contains(p), |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
812 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
813 p, bottom(), end())); |
0 | 814 HeapWord* current_top = top(); |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
815 assert(p <= current_top, |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
816 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
817 p, current_top)); |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
818 assert(p == current_top || oop(p)->is_oop(), |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
819 err_msg("p (" PTR_FORMAT ") is not a block start - " |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
820 "current_top: " PTR_FORMAT ", is_oop: %s", |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
821 p, current_top, BOOL_TO_STR(oop(p)->is_oop()))); |
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
822 if (p < current_top) { |
0 | 823 return oop(p)->size(); |
7181
2fc0334f613a
7194633: G1: Assertion and guarantee failures in block offset table
johnc
parents:
6725
diff
changeset
|
824 } else { |
0 | 825 assert(p == current_top, "just checking"); |
826 return pointer_delta(end(), (HeapWord*) p); | |
827 } | |
828 } | |
829 | |
830 // This version requires locking. | |
831 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, | |
832 HeapWord* const end_value) { | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
833 // In G1 there are places where a GC worker can allocates into a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
834 // region using this serial allocation code without being prone to a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
835 // race with other GC workers (we ensure that no other GC worker can |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
836 // access the same region at the same time). So the assert below is |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
837 // too strong in the case of G1. |
0 | 838 assert(Heap_lock->owned_by_self() || |
839 (SafepointSynchronize::is_at_safepoint() && | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
1972
diff
changeset
|
840 (Thread::current()->is_VM_thread() || UseG1GC)), |
0 | 841 "not locked"); |
842 HeapWord* obj = top(); | |
843 if (pointer_delta(end_value, obj) >= size) { | |
844 HeapWord* new_top = obj + size; | |
845 set_top(new_top); | |
846 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); | |
847 return obj; | |
848 } else { | |
849 return NULL; | |
850 } | |
851 } | |
852 | |
853 // This version is lock-free. | |
854 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, | |
855 HeapWord* const end_value) { | |
856 do { | |
857 HeapWord* obj = top(); | |
858 if (pointer_delta(end_value, obj) >= size) { | |
859 HeapWord* new_top = obj + size; | |
860 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); | |
861 // result can be one of two: | |
862 // the old top value: the exchange succeeded | |
863 // otherwise: the new value of the top is returned. | |
864 if (result == obj) { | |
865 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); | |
866 return obj; | |
867 } | |
868 } else { | |
869 return NULL; | |
870 } | |
871 } while (true); | |
872 } | |
873 | |
874 // Requires locking. | |
875 HeapWord* ContiguousSpace::allocate(size_t size) { | |
876 return allocate_impl(size, end()); | |
877 } | |
878 | |
879 // Lock-free. | |
880 HeapWord* ContiguousSpace::par_allocate(size_t size) { | |
881 return par_allocate_impl(size, end()); | |
882 } | |
883 | |
884 void ContiguousSpace::allocate_temporary_filler(int factor) { | |
885 // allocate temporary type array decreasing free size with factor 'factor' | |
886 assert(factor >= 0, "just checking"); | |
887 size_t size = pointer_delta(end(), top()); | |
888 | |
889 // if space is full, return | |
890 if (size == 0) return; | |
891 | |
892 if (factor > 0) { | |
893 size -= size/factor; | |
894 } | |
895 size = align_object_size(size); | |
896 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1142
diff
changeset
|
897 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1142
diff
changeset
|
898 if (size >= (size_t)align_object_size(array_header_size)) { |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1142
diff
changeset
|
899 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); |
0 | 900 // allocate uninitialized int array |
901 typeArrayOop t = (typeArrayOop) allocate(size); | |
902 assert(t != NULL, "allocation should succeed"); | |
903 t->set_mark(markOopDesc::prototype()); | |
904 t->set_klass(Universe::intArrayKlassObj()); | |
905 t->set_length((int)length); | |
906 } else { | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1142
diff
changeset
|
907 assert(size == CollectedHeap::min_fill_size(), |
0 | 908 "size for smallest fake object doesn't match"); |
909 instanceOop obj = (instanceOop) allocate(size); | |
910 obj->set_mark(markOopDesc::prototype()); | |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
911 obj->set_klass_gap(0); |
1142 | 912 obj->set_klass(SystemDictionary::Object_klass()); |
0 | 913 } |
914 } | |
915 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
916 void EdenSpace::clear(bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
917 ContiguousSpace::clear(mangle_space); |
0 | 918 set_soft_end(end()); |
919 } | |
920 | |
921 // Requires locking. | |
922 HeapWord* EdenSpace::allocate(size_t size) { | |
923 return allocate_impl(size, soft_end()); | |
924 } | |
925 | |
926 // Lock-free. | |
927 HeapWord* EdenSpace::par_allocate(size_t size) { | |
928 return par_allocate_impl(size, soft_end()); | |
929 } | |
930 | |
931 HeapWord* ConcEdenSpace::par_allocate(size_t size) | |
932 { | |
933 do { | |
934 // The invariant is top() should be read before end() because | |
935 // top() can't be greater than end(), so if an update of _soft_end | |
936 // occurs between 'end_val = end();' and 'top_val = top();' top() | |
937 // also can grow up to the new end() and the condition | |
938 // 'top_val > end_val' is true. To ensure the loading order | |
939 // OrderAccess::loadload() is required after top() read. | |
940 HeapWord* obj = top(); | |
941 OrderAccess::loadload(); | |
942 if (pointer_delta(*soft_end_addr(), obj) >= size) { | |
943 HeapWord* new_top = obj + size; | |
944 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); | |
945 // result can be one of two: | |
946 // the old top value: the exchange succeeded | |
947 // otherwise: the new value of the top is returned. | |
948 if (result == obj) { | |
949 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); | |
950 return obj; | |
951 } | |
952 } else { | |
953 return NULL; | |
954 } | |
955 } while (true); | |
956 } | |
957 | |
958 | |
959 HeapWord* OffsetTableContigSpace::initialize_threshold() { | |
960 return _offsets.initialize_threshold(); | |
961 } | |
962 | |
963 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { | |
964 _offsets.alloc_block(start, end); | |
965 return _offsets.threshold(); | |
966 } | |
967 | |
968 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, | |
969 MemRegion mr) : | |
970 _offsets(sharedOffsetArray, mr), | |
971 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) | |
972 { | |
973 _offsets.set_contig_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
974 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 975 } |
976 | |
977 #define OBJ_SAMPLE_INTERVAL 0 | |
978 #define BLOCK_SAMPLE_INTERVAL 100 | |
979 | |
6008 | 980 void OffsetTableContigSpace::verify() const { |
0 | 981 HeapWord* p = bottom(); |
982 HeapWord* prev_p = NULL; | |
983 int objs = 0; | |
984 int blocks = 0; | |
985 | |
986 if (VerifyObjectStartArray) { | |
987 _offsets.verify(); | |
988 } | |
989 | |
990 while (p < top()) { | |
991 size_t size = oop(p)->size(); | |
992 // For a sampling of objects in the space, find it using the | |
993 // block offset table. | |
994 if (blocks == BLOCK_SAMPLE_INTERVAL) { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
995 guarantee(p == block_start_const(p + (size/2)), |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
996 "check offset computation"); |
0 | 997 blocks = 0; |
998 } else { | |
999 blocks++; | |
1000 } | |
1001 | |
1002 if (objs == OBJ_SAMPLE_INTERVAL) { | |
1003 oop(p)->verify(); | |
1004 objs = 0; | |
1005 } else { | |
1006 objs++; | |
1007 } | |
1008 prev_p = p; | |
1009 p += size; | |
1010 } | |
1011 guarantee(p == top(), "end of last object must match end of space"); | |
1012 } | |
1013 | |
1014 | |
438 | 1015 size_t TenuredSpace::allowed_dead_ratio() const { |
0 | 1016 return MarkSweepDeadRatio; |
1017 } |