Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/defNewGeneration.hpp @ 1091:6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
Summary: Introduced a new write_ref_array(HeapWords* start, size_t count) method that does the requisite MemRegion range calculation so (some of the) clients of the erstwhile write_ref_array(MemRegion mr) do not need to worry. This removed all external uses of array_size(), which was also simplified and made private. Asserts were added to catch other possible issues. Further, less essential, fixes stemming from this investigation are deferred to CR 6904516 (to follow shortly in hs17).
Reviewed-by: kvn, coleenp, jmasa
author | ysr |
---|---|
date | Thu, 03 Dec 2009 15:01:57 -0800 |
parents | 850fdf70db2b |
children | c18cbe5936b8 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 class EdenSpace; | |
26 class ContiguousSpace; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
27 class ScanClosure; |
0 | 28 |
29 // DefNewGeneration is a young generation containing eden, from- and | |
30 // to-space. | |
31 | |
32 class DefNewGeneration: public Generation { | |
33 friend class VMStructs; | |
34 | |
35 protected: | |
36 Generation* _next_gen; | |
37 int _tenuring_threshold; // Tenuring threshold for next collection. | |
38 ageTable _age_table; | |
39 // Size of object to pretenure in words; command line provides bytes | |
40 size_t _pretenure_size_threshold_words; | |
41 | |
42 ageTable* age_table() { return &_age_table; } | |
43 // Initialize state to optimistically assume no promotion failure will | |
44 // happen. | |
45 void init_assuming_no_promotion_failure(); | |
46 // True iff a promotion has failed in the current collection. | |
47 bool _promotion_failed; | |
48 bool promotion_failed() { return _promotion_failed; } | |
49 | |
50 // Handling promotion failure. A young generation collection | |
51 // can fail if a live object cannot be copied out of its | |
52 // location in eden or from-space during the collection. If | |
53 // a collection fails, the young generation is left in a | |
54 // consistent state such that it can be collected by a | |
55 // full collection. | |
56 // Before the collection | |
57 // Objects are in eden or from-space | |
58 // All roots into the young generation point into eden or from-space. | |
59 // | |
60 // After a failed collection | |
61 // Objects may be in eden, from-space, or to-space | |
62 // An object A in eden or from-space may have a copy B | |
63 // in to-space. If B exists, all roots that once pointed | |
64 // to A must now point to B. | |
65 // All objects in the young generation are unmarked. | |
66 // Eden, from-space, and to-space will all be collected by | |
67 // the full collection. | |
68 void handle_promotion_failure(oop); | |
69 | |
70 // In the absence of promotion failure, we wouldn't look at "from-space" | |
71 // objects after a young-gen collection. When promotion fails, however, | |
72 // the subsequent full collection will look at from-space objects: | |
73 // therefore we must remove their forwarding pointers. | |
74 void remove_forwarding_pointers(); | |
75 | |
76 // Preserve the mark of "obj", if necessary, in preparation for its mark | |
77 // word being overwritten with a self-forwarding-pointer. | |
78 void preserve_mark_if_necessary(oop obj, markOop m); | |
79 | |
80 // When one is non-null, so is the other. Together, they each pair is | |
81 // an object with a preserved mark, and its mark value. | |
82 GrowableArray<oop>* _objs_with_preserved_marks; | |
83 GrowableArray<markOop>* _preserved_marks_of_objs; | |
84 | |
85 // Returns true if the collection can be safely attempted. | |
86 // If this method returns false, a collection is not | |
87 // guaranteed to fail but the system may not be able | |
88 // to recover from the failure. | |
89 bool collection_attempt_is_safe(); | |
90 | |
91 // Promotion failure handling | |
92 OopClosure *_promo_failure_scan_stack_closure; | |
93 void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) { | |
94 _promo_failure_scan_stack_closure = scan_stack_closure; | |
95 } | |
96 | |
97 GrowableArray<oop>* _promo_failure_scan_stack; | |
98 GrowableArray<oop>* promo_failure_scan_stack() const { | |
99 return _promo_failure_scan_stack; | |
100 } | |
101 void push_on_promo_failure_scan_stack(oop); | |
102 void drain_promo_failure_scan_stack(void); | |
103 bool _promo_failure_drain_in_progress; | |
104 | |
105 // Performance Counters | |
106 GenerationCounters* _gen_counters; | |
107 CSpaceCounters* _eden_counters; | |
108 CSpaceCounters* _from_counters; | |
109 CSpaceCounters* _to_counters; | |
110 | |
111 // sizing information | |
112 size_t _max_eden_size; | |
113 size_t _max_survivor_size; | |
114 | |
115 // Allocation support | |
116 bool _should_allocate_from_space; | |
117 bool should_allocate_from_space() const { | |
118 return _should_allocate_from_space; | |
119 } | |
120 void clear_should_allocate_from_space() { | |
121 _should_allocate_from_space = false; | |
122 } | |
123 void set_should_allocate_from_space() { | |
124 _should_allocate_from_space = true; | |
125 } | |
126 | |
127 protected: | |
128 // Spaces | |
129 EdenSpace* _eden_space; | |
130 ContiguousSpace* _from_space; | |
131 ContiguousSpace* _to_space; | |
132 | |
133 enum SomeProtectedConstants { | |
134 // Generations are GenGrain-aligned and have size that are multiples of | |
135 // GenGrain. | |
136 MinFreeScratchWords = 100 | |
137 }; | |
138 | |
139 // Return the size of a survivor space if this generation were of size | |
140 // gen_size. | |
141 size_t compute_survivor_size(size_t gen_size, size_t alignment) const { | |
142 size_t n = gen_size / (SurvivorRatio + 2); | |
143 return n > alignment ? align_size_down(n, alignment) : alignment; | |
144 } | |
145 | |
146 public: // was "protected" but caused compile error on win32 | |
147 class IsAliveClosure: public BoolObjectClosure { | |
148 Generation* _g; | |
149 public: | |
150 IsAliveClosure(Generation* g); | |
151 void do_object(oop p); | |
152 bool do_object_b(oop p); | |
153 }; | |
154 | |
155 class KeepAliveClosure: public OopClosure { | |
156 protected: | |
157 ScanWeakRefClosure* _cl; | |
158 CardTableRS* _rs; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
159 template <class T> void do_oop_work(T* p); |
0 | 160 public: |
161 KeepAliveClosure(ScanWeakRefClosure* cl); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
162 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
163 virtual void do_oop(narrowOop* p); |
0 | 164 }; |
165 | |
166 class FastKeepAliveClosure: public KeepAliveClosure { | |
167 protected: | |
168 HeapWord* _boundary; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
169 template <class T> void do_oop_work(T* p); |
0 | 170 public: |
171 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
172 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
173 virtual void do_oop(narrowOop* p); |
0 | 174 }; |
175 | |
176 class EvacuateFollowersClosure: public VoidClosure { | |
177 GenCollectedHeap* _gch; | |
178 int _level; | |
179 ScanClosure* _scan_cur_or_nonheap; | |
180 ScanClosure* _scan_older; | |
181 public: | |
182 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
183 ScanClosure* cur, ScanClosure* older); | |
184 void do_void(); | |
185 }; | |
186 | |
187 class FastEvacuateFollowersClosure; | |
188 friend class FastEvacuateFollowersClosure; | |
189 class FastEvacuateFollowersClosure: public VoidClosure { | |
190 GenCollectedHeap* _gch; | |
191 int _level; | |
192 DefNewGeneration* _gen; | |
193 FastScanClosure* _scan_cur_or_nonheap; | |
194 FastScanClosure* _scan_older; | |
195 public: | |
196 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
197 DefNewGeneration* gen, | |
198 FastScanClosure* cur, | |
199 FastScanClosure* older); | |
200 void do_void(); | |
201 }; | |
202 | |
203 public: | |
204 DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level, | |
205 const char* policy="Copy"); | |
206 | |
207 virtual Generation::Name kind() { return Generation::DefNew; } | |
208 | |
209 // Accessing spaces | |
210 EdenSpace* eden() const { return _eden_space; } | |
211 ContiguousSpace* from() const { return _from_space; } | |
212 ContiguousSpace* to() const { return _to_space; } | |
213 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
214 virtual CompactibleSpace* first_compaction_space() const; |
0 | 215 |
216 // Space enquiries | |
217 size_t capacity() const; | |
218 size_t used() const; | |
219 size_t free() const; | |
220 size_t max_capacity() const; | |
221 size_t capacity_before_gc() const; | |
222 size_t unsafe_max_alloc_nogc() const; | |
223 size_t contiguous_available() const; | |
224 | |
225 size_t max_eden_size() const { return _max_eden_size; } | |
226 size_t max_survivor_size() const { return _max_survivor_size; } | |
227 | |
228 bool supports_inline_contig_alloc() const { return true; } | |
229 HeapWord** top_addr() const; | |
230 HeapWord** end_addr() const; | |
231 | |
232 // Thread-local allocation buffers | |
233 bool supports_tlab_allocation() const { return true; } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
234 size_t tlab_capacity() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
235 size_t unsafe_max_tlab_alloc() const; |
0 | 236 |
237 // Grow the generation by the specified number of bytes. | |
238 // The size of bytes is assumed to be properly aligned. | |
239 // Return true if the expansion was successful. | |
240 bool expand(size_t bytes); | |
241 | |
242 // DefNewGeneration cannot currently expand except at | |
243 // a GC. | |
244 virtual bool is_maximal_no_gc() const { return true; } | |
245 | |
246 // Iteration | |
247 void object_iterate(ObjectClosure* blk); | |
248 void object_iterate_since_last_GC(ObjectClosure* cl); | |
249 | |
250 void younger_refs_iterate(OopsInGenClosure* cl); | |
251 | |
252 void space_iterate(SpaceClosure* blk, bool usedOnly = false); | |
253 | |
254 // Allocation support | |
255 virtual bool should_allocate(size_t word_size, bool is_tlab) { | |
256 assert(UseTLAB || !is_tlab, "Should not allocate tlab"); | |
257 | |
258 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); | |
259 | |
260 const bool non_zero = word_size > 0; | |
261 const bool overflows = word_size >= overflow_limit; | |
262 const bool check_too_big = _pretenure_size_threshold_words > 0; | |
263 const bool not_too_big = word_size < _pretenure_size_threshold_words; | |
264 const bool size_ok = is_tlab || !check_too_big || not_too_big; | |
265 | |
266 bool result = !overflows && | |
267 non_zero && | |
268 size_ok; | |
269 | |
270 return result; | |
271 } | |
272 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
273 HeapWord* allocate(size_t word_size, bool is_tlab); |
0 | 274 HeapWord* allocate_from_space(size_t word_size); |
275 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
276 HeapWord* par_allocate(size_t word_size, bool is_tlab); |
0 | 277 |
278 // Prologue & Epilogue | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
279 virtual void gc_prologue(bool full); |
0 | 280 virtual void gc_epilogue(bool full); |
281 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
282 // Save the tops for eden, from, and to |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
283 virtual void record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
284 |
0 | 285 // Doesn't require additional work during GC prologue and epilogue |
286 virtual bool performs_in_place_marking() const { return false; } | |
287 | |
288 // Accessing marks | |
289 void save_marks(); | |
290 void reset_saved_marks(); | |
291 bool no_allocs_since_save_marks(); | |
292 | |
293 // Need to declare the full complement of closures, whether we'll | |
294 // override them or not, or get message from the compiler: | |
295 // oop_since_save_marks_iterate_nv hides virtual function... | |
296 #define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
297 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); | |
298 | |
299 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL) | |
300 | |
301 #undef DefNew_SINCE_SAVE_MARKS_DECL | |
302 | |
303 // For non-youngest collection, the DefNewGeneration can contribute | |
304 // "to-space". | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
305 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, |
0 | 306 size_t max_alloc_words); |
307 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
308 // Reset for contribution of "to-space". |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
309 virtual void reset_scratch(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
310 |
0 | 311 // GC support |
312 virtual void compute_new_size(); | |
313 virtual void collect(bool full, | |
314 bool clear_all_soft_refs, | |
315 size_t size, | |
316 bool is_tlab); | |
317 HeapWord* expand_and_allocate(size_t size, | |
318 bool is_tlab, | |
319 bool parallel = false); | |
320 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
321 oop copy_to_survivor_space(oop old); |
0 | 322 int tenuring_threshold() { return _tenuring_threshold; } |
323 | |
324 // Performance Counter support | |
325 void update_counters(); | |
326 | |
327 // Printing | |
328 virtual const char* name() const; | |
329 virtual const char* short_name() const { return "DefNew"; } | |
330 | |
331 bool must_be_youngest() const { return true; } | |
332 bool must_be_oldest() const { return false; } | |
333 | |
334 // PrintHeapAtGC support. | |
335 void print_on(outputStream* st) const; | |
336 | |
337 void verify(bool allow_dirty); | |
338 | |
339 protected: | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
340 // If clear_space is true, clear the survivor spaces. Eden is |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
341 // cleared if the minimum size of eden is 0. If mangle_space |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
342 // is true, also mangle the space in debug mode. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
343 void compute_space_boundaries(uintx minimum_eden_size, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
344 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
345 bool mangle_space); |
0 | 346 // Scavenge support |
347 void swap_spaces(); | |
348 }; |