comparison src/share/vm/memory/space.inline.hpp @ 20198:c49dcaf78a65

8042737: Introduce umbrella header prefetch.inline.hpp Reviewed-by: twisti, stefank
author goetz
date Thu, 08 May 2014 15:37:17 +0200
parents b9a9ed0f8eeb
children
comparison
equal deleted inserted replaced
20197:ce8f6bb717c9 20198:c49dcaf78a65
26 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP 26 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
27 27
28 #include "gc_interface/collectedHeap.hpp" 28 #include "gc_interface/collectedHeap.hpp"
29 #include "memory/space.hpp" 29 #include "memory/space.hpp"
30 #include "memory/universe.hpp" 30 #include "memory/universe.hpp"
31 #include "runtime/prefetch.inline.hpp"
31 #include "runtime/safepoint.hpp" 32 #include "runtime/safepoint.hpp"
32 33
33 inline HeapWord* Space::block_start(const void* p) { 34 inline HeapWord* Space::block_start(const void* p) {
34 return block_start_const(p); 35 return block_start_const(p);
36 }
37
38 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
39 /* Compute the new addresses for the live objects and store it in the mark \
40 * Used by universe::mark_sweep_phase2() \
41 */ \
42 HeapWord* compact_top; /* This is where we are currently compacting to. */ \
43 \
44 /* We're sure to be here before any objects are compacted into this \
45 * space, so this is a good time to initialize this: \
46 */ \
47 set_compaction_top(bottom()); \
48 \
49 if (cp->space == NULL) { \
50 assert(cp->gen != NULL, "need a generation"); \
51 assert(cp->threshold == NULL, "just checking"); \
52 assert(cp->gen->first_compaction_space() == this, "just checking"); \
53 cp->space = cp->gen->first_compaction_space(); \
54 compact_top = cp->space->bottom(); \
55 cp->space->set_compaction_top(compact_top); \
56 cp->threshold = cp->space->initialize_threshold(); \
57 } else { \
58 compact_top = cp->space->compaction_top(); \
59 } \
60 \
61 /* We allow some amount of garbage towards the bottom of the space, so \
62 * we don't start compacting before there is a significant gain to be made.\
63 * Occasionally, we want to ensure a full compaction, which is determined \
64 * by the MarkSweepAlwaysCompactCount parameter. \
65 */ \
66 uint invocations = MarkSweep::total_invocations(); \
67 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
68 \
69 size_t allowed_deadspace = 0; \
70 if (skip_dead) { \
71 const size_t ratio = allowed_dead_ratio(); \
72 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
73 } \
74 \
75 HeapWord* q = bottom(); \
76 HeapWord* t = scan_limit(); \
77 \
78 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
79 live object. */ \
80 HeapWord* first_dead = end();/* The first dead object. */ \
81 LiveRange* liveRange = NULL; /* The current live range, recorded in the \
82 first header of preceding free area. */ \
83 _first_dead = first_dead; \
84 \
85 const intx interval = PrefetchScanIntervalInBytes; \
86 \
87 while (q < t) { \
88 assert(!block_is_obj(q) || \
89 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
90 oop(q)->mark()->has_bias_pattern(), \
91 "these are the only valid states during a mark sweep"); \
92 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
93 /* prefetch beyond q */ \
94 Prefetch::write(q, interval); \
95 size_t size = block_size(q); \
96 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
97 q += size; \
98 end_of_live = q; \
99 } else { \
100 /* run over all the contiguous dead objects */ \
101 HeapWord* end = q; \
102 do { \
103 /* prefetch beyond end */ \
104 Prefetch::write(end, interval); \
105 end += block_size(end); \
106 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
107 \
108 /* see if we might want to pretend this object is alive so that \
109 * we don't have to compact quite as often. \
110 */ \
111 if (allowed_deadspace > 0 && q == compact_top) { \
112 size_t sz = pointer_delta(end, q); \
113 if (insert_deadspace(allowed_deadspace, q, sz)) { \
114 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
115 q = end; \
116 end_of_live = end; \
117 continue; \
118 } \
119 } \
120 \
121 /* otherwise, it really is a free region. */ \
122 \
123 /* for the previous LiveRange, record the end of the live objects. */ \
124 if (liveRange) { \
125 liveRange->set_end(q); \
126 } \
127 \
128 /* record the current LiveRange object. \
129 * liveRange->start() is overlaid on the mark word. \
130 */ \
131 liveRange = (LiveRange*)q; \
132 liveRange->set_start(end); \
133 liveRange->set_end(end); \
134 \
135 /* see if this is the first dead region. */ \
136 if (q < first_dead) { \
137 first_dead = q; \
138 } \
139 \
140 /* move on to the next object */ \
141 q = end; \
142 } \
143 } \
144 \
145 assert(q == t, "just checking"); \
146 if (liveRange != NULL) { \
147 liveRange->set_end(q); \
148 } \
149 _end_of_live = end_of_live; \
150 if (end_of_live < first_dead) { \
151 first_dead = end_of_live; \
152 } \
153 _first_dead = first_dead; \
154 \
155 /* save the compaction_top of the compaction space. */ \
156 cp->space->set_compaction_top(compact_top); \
157 }
158
159 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
160 /* adjust all the interior pointers to point at the new locations of objects \
161 * Used by MarkSweep::mark_sweep_phase3() */ \
162 \
163 HeapWord* q = bottom(); \
164 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
165 \
166 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
167 \
168 if (q < t && _first_dead > q && \
169 !oop(q)->is_gc_marked()) { \
170 /* we have a chunk of the space which hasn't moved and we've \
171 * reinitialized the mark word during the previous pass, so we can't \
172 * use is_gc_marked for the traversal. */ \
173 HeapWord* end = _first_dead; \
174 \
175 while (q < end) { \
176 /* I originally tried to conjoin "block_start(q) == q" to the \
177 * assertion below, but that doesn't work, because you can't \
178 * accurately traverse previous objects to get to the current one \
179 * after their pointers have been \
180 * updated, until the actual compaction is done. dld, 4/00 */ \
181 assert(block_is_obj(q), \
182 "should be at block boundaries, and should be looking at objs"); \
183 \
184 /* point all the oops to the new location */ \
185 size_t size = oop(q)->adjust_pointers(); \
186 size = adjust_obj_size(size); \
187 \
188 q += size; \
189 } \
190 \
191 if (_first_dead == t) { \
192 q = t; \
193 } else { \
194 /* $$$ This is funky. Using this to read the previously written \
195 * LiveRange. See also use below. */ \
196 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
197 } \
198 } \
199 \
200 const intx interval = PrefetchScanIntervalInBytes; \
201 \
202 debug_only(HeapWord* prev_q = NULL); \
203 while (q < t) { \
204 /* prefetch beyond q */ \
205 Prefetch::write(q, interval); \
206 if (oop(q)->is_gc_marked()) { \
207 /* q is alive */ \
208 /* point all the oops to the new location */ \
209 size_t size = oop(q)->adjust_pointers(); \
210 size = adjust_obj_size(size); \
211 debug_only(prev_q = q); \
212 q += size; \
213 } else { \
214 /* q is not a live object, so its mark should point at the next \
215 * live object */ \
216 debug_only(prev_q = q); \
217 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
218 assert(q > prev_q, "we should be moving forward through memory"); \
219 } \
220 } \
221 \
222 assert(q == t, "just checking"); \
223 }
224
225 #define SCAN_AND_COMPACT(obj_size) { \
226 /* Copy all live objects to their new location \
227 * Used by MarkSweep::mark_sweep_phase4() */ \
228 \
229 HeapWord* q = bottom(); \
230 HeapWord* const t = _end_of_live; \
231 debug_only(HeapWord* prev_q = NULL); \
232 \
233 if (q < t && _first_dead > q && \
234 !oop(q)->is_gc_marked()) { \
235 debug_only( \
236 /* we have a chunk of the space which hasn't moved and we've reinitialized \
237 * the mark word during the previous pass, so we can't use is_gc_marked for \
238 * the traversal. */ \
239 HeapWord* const end = _first_dead; \
240 \
241 while (q < end) { \
242 size_t size = obj_size(q); \
243 assert(!oop(q)->is_gc_marked(), \
244 "should be unmarked (special dense prefix handling)"); \
245 debug_only(prev_q = q); \
246 q += size; \
247 } \
248 ) /* debug_only */ \
249 \
250 if (_first_dead == t) { \
251 q = t; \
252 } else { \
253 /* $$$ Funky */ \
254 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
255 } \
256 } \
257 \
258 const intx scan_interval = PrefetchScanIntervalInBytes; \
259 const intx copy_interval = PrefetchCopyIntervalInBytes; \
260 while (q < t) { \
261 if (!oop(q)->is_gc_marked()) { \
262 /* mark is pointer to next marked oop */ \
263 debug_only(prev_q = q); \
264 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
265 assert(q > prev_q, "we should be moving forward through memory"); \
266 } else { \
267 /* prefetch beyond q */ \
268 Prefetch::read(q, scan_interval); \
269 \
270 /* size and destination */ \
271 size_t size = obj_size(q); \
272 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
273 \
274 /* prefetch beyond compaction_top */ \
275 Prefetch::write(compaction_top, copy_interval); \
276 \
277 /* copy object and reinit its mark */ \
278 assert(q != compaction_top, "everything in this pass should be moving"); \
279 Copy::aligned_conjoint_words(q, compaction_top, size); \
280 oop(compaction_top)->init_mark(); \
281 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
282 \
283 debug_only(prev_q = q); \
284 q += size; \
285 } \
286 } \
287 \
288 /* Let's remember if we were empty before we did the compaction. */ \
289 bool was_empty = used_region().is_empty(); \
290 /* Reset space after compaction is complete */ \
291 reset_after_compaction(); \
292 /* We do this clear, below, since it has overloaded meanings for some */ \
293 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
294 /* compacted into will have had their offset table thresholds updated */ \
295 /* continuously, but those that weren't need to have their thresholds */ \
296 /* re-initialized. Also mangles unused area for debugging. */ \
297 if (used_region().is_empty()) { \
298 if (!was_empty) clear(SpaceDecorator::Mangle); \
299 } else { \
300 if (ZapUnusedHeapArea) mangle_unused_area(); \
301 } \
35 } 302 }
36 303
37 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { 304 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
38 HeapWord* res = ContiguousSpace::allocate(size); 305 HeapWord* res = ContiguousSpace::allocate(size);
39 if (res != NULL) { 306 if (res != NULL) {