comparison src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 12eea04c8b06
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_psOldGen.cpp.incl"
27
28 inline const char* PSOldGen::select_name() {
29 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
30 }
31
32 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
33 size_t initial_size, size_t min_size, size_t max_size,
34 const char* perf_data_name, int level):
35 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
36 _max_gen_size(max_size)
37 {
38 initialize(rs, alignment, perf_data_name, level);
39 }
40
41 PSOldGen::PSOldGen(size_t initial_size,
42 size_t min_size, size_t max_size,
43 const char* perf_data_name, int level):
44 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
45 _max_gen_size(max_size)
46 {}
47
48 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
49 const char* perf_data_name, int level) {
50 initialize_virtual_space(rs, alignment);
51 initialize_work(perf_data_name, level);
52 // The old gen can grow to gen_size_limit(). _reserve reflects only
53 // the current maximum that can be committed.
54 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
55 }
56
57 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
58
59 _virtual_space = new PSVirtualSpace(rs, alignment);
60 if (!_virtual_space->expand_by(_init_gen_size)) {
61 vm_exit_during_initialization("Could not reserve enough space for "
62 "object heap");
63 }
64 }
65
66 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
67 //
68 // Basic memory initialization
69 //
70
71 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
72 heap_word_size(_max_gen_size));
73 assert(limit_reserved.byte_size() == _max_gen_size,
74 "word vs bytes confusion");
75 //
76 // Object start stuff
77 //
78
79 start_array()->initialize(limit_reserved);
80
81 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
82 (HeapWord*)virtual_space()->high_boundary());
83
84 //
85 // Card table stuff
86 //
87
88 MemRegion cmr((HeapWord*)virtual_space()->low(),
89 (HeapWord*)virtual_space()->high());
90 Universe::heap()->barrier_set()->resize_covered_region(cmr);
91
92 CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
93 assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
94
95 // Verify that the start and end of this generation is the start of a card.
96 // If this wasn't true, a single card could span more than one generation,
97 // which would cause problems when we commit/uncommit memory, and when we
98 // clear and dirty cards.
99 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
100 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
101 // Don't check at the very end of the heap as we'll assert that we're probing off
102 // the end if we try.
103 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
104 }
105
106 //
107 // ObjectSpace stuff
108 //
109
110 _object_space = new MutableSpace();
111
112 if (_object_space == NULL)
113 vm_exit_during_initialization("Could not allocate an old gen space");
114
115 object_space()->initialize(cmr, true);
116
117 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
118
119 if (_object_mark_sweep == NULL)
120 vm_exit_during_initialization("Could not complete allocation of old generation");
121
122 // Update the start_array
123 start_array()->set_covered_region(cmr);
124
125 // Generation Counters, generation 'level', 1 subspace
126 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
127 virtual_space());
128 _space_counters = new SpaceCounters(perf_data_name, 0,
129 virtual_space()->reserved_size(),
130 _object_space, _gen_counters);
131 }
132
133 // Assume that the generation has been allocated if its
134 // reserved size is not 0.
135 bool PSOldGen::is_allocated() {
136 return virtual_space()->reserved_size() != 0;
137 }
138
139 void PSOldGen::precompact() {
140 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
141 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
142
143 // Reset start array first.
144 debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
145 start_array()->reset();
146 debug_only(})
147
148 object_mark_sweep()->precompact();
149
150 // Now compact the young gen
151 heap->young_gen()->precompact();
152 }
153
154 void PSOldGen::adjust_pointers() {
155 object_mark_sweep()->adjust_pointers();
156 }
157
158 void PSOldGen::compact() {
159 object_mark_sweep()->compact(ZapUnusedHeapArea);
160 }
161
162 void PSOldGen::move_and_update(ParCompactionManager* cm) {
163 PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id);
164 }
165
166 size_t PSOldGen::contiguous_available() const {
167 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
168 }
169
170 // Allocation. We report all successful allocations to the size policy
171 // Note that the perm gen does not use this method, and should not!
172 HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
173 assert_locked_or_safepoint(Heap_lock);
174 HeapWord* res = allocate_noexpand(word_size, is_tlab);
175
176 if (res == NULL) {
177 res = expand_and_allocate(word_size, is_tlab);
178 }
179
180 // Allocations in the old generation need to be reported
181 if (res != NULL) {
182 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
183 heap->size_policy()->tenured_allocation(word_size);
184 }
185
186 return res;
187 }
188
189 HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
190 assert(!is_tlab, "TLAB's are not supported in PSOldGen");
191 expand(word_size*HeapWordSize);
192 if (GCExpandToAllocateDelayMillis > 0) {
193 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
194 }
195 return allocate_noexpand(word_size, is_tlab);
196 }
197
198 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
199 expand(word_size*HeapWordSize);
200 if (GCExpandToAllocateDelayMillis > 0) {
201 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
202 }
203 return cas_allocate_noexpand(word_size);
204 }
205
206 void PSOldGen::expand(size_t bytes) {
207 MutexLocker x(ExpandHeap_lock);
208 const size_t alignment = virtual_space()->alignment();
209 size_t aligned_bytes = align_size_up(bytes, alignment);
210 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
211
212 bool success = false;
213 if (aligned_expand_bytes > aligned_bytes) {
214 success = expand_by(aligned_expand_bytes);
215 }
216 if (!success) {
217 success = expand_by(aligned_bytes);
218 }
219 if (!success) {
220 success = expand_to_reserved();
221 }
222
223 if (GC_locker::is_active()) {
224 if (PrintGC && Verbose) {
225 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
226 }
227 }
228 }
229
230 bool PSOldGen::expand_by(size_t bytes) {
231 assert_lock_strong(ExpandHeap_lock);
232 assert_locked_or_safepoint(Heap_lock);
233 bool result = virtual_space()->expand_by(bytes);
234 if (result) {
235 post_resize();
236 if (UsePerfData) {
237 _space_counters->update_capacity();
238 _gen_counters->update_all();
239 }
240 }
241
242 if (result && Verbose && PrintGC) {
243 size_t new_mem_size = virtual_space()->committed_size();
244 size_t old_mem_size = new_mem_size - bytes;
245 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
246 SIZE_FORMAT "K to "
247 SIZE_FORMAT "K",
248 name(), old_mem_size/K, bytes/K, new_mem_size/K);
249 }
250
251 return result;
252 }
253
254 bool PSOldGen::expand_to_reserved() {
255 assert_lock_strong(ExpandHeap_lock);
256 assert_locked_or_safepoint(Heap_lock);
257
258 bool result = true;
259 const size_t remaining_bytes = virtual_space()->uncommitted_size();
260 if (remaining_bytes > 0) {
261 result = expand_by(remaining_bytes);
262 DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
263 }
264 return result;
265 }
266
267 void PSOldGen::shrink(size_t bytes) {
268 assert_lock_strong(ExpandHeap_lock);
269 assert_locked_or_safepoint(Heap_lock);
270
271 size_t size = align_size_down(bytes, virtual_space()->alignment());
272 if (size > 0) {
273 assert_lock_strong(ExpandHeap_lock);
274 virtual_space()->shrink_by(bytes);
275 post_resize();
276
277 if (Verbose && PrintGC) {
278 size_t new_mem_size = virtual_space()->committed_size();
279 size_t old_mem_size = new_mem_size + bytes;
280 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
281 SIZE_FORMAT "K to "
282 SIZE_FORMAT "K",
283 name(), old_mem_size/K, bytes/K, new_mem_size/K);
284 }
285 }
286 }
287
288 void PSOldGen::resize(size_t desired_free_space) {
289 const size_t alignment = virtual_space()->alignment();
290 const size_t size_before = virtual_space()->committed_size();
291 size_t new_size = used_in_bytes() + desired_free_space;
292 if (new_size < used_in_bytes()) {
293 // Overflowed the addition.
294 new_size = gen_size_limit();
295 }
296 // Adjust according to our min and max
297 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
298
299 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
300 new_size = align_size_up(new_size, alignment);
301
302 const size_t current_size = capacity_in_bytes();
303
304 if (PrintAdaptiveSizePolicy && Verbose) {
305 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
306 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
307 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
308 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
309 desired_free_space, used_in_bytes(), new_size, current_size,
310 gen_size_limit(), min_gen_size());
311 }
312
313 if (new_size == current_size) {
314 // No change requested
315 return;
316 }
317 if (new_size > current_size) {
318 size_t change_bytes = new_size - current_size;
319 expand(change_bytes);
320 } else {
321 size_t change_bytes = current_size - new_size;
322 // shrink doesn't grab this lock, expand does. Is that right?
323 MutexLocker x(ExpandHeap_lock);
324 shrink(change_bytes);
325 }
326
327 if (PrintAdaptiveSizePolicy) {
328 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
329 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
330 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
331 "collection: %d "
332 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
333 heap->total_collections(),
334 size_before, virtual_space()->committed_size());
335 }
336 }
337
338 // NOTE! We need to be careful about resizing. During a GC, multiple
339 // allocators may be active during heap expansion. If we allow the
340 // heap resizing to become visible before we have correctly resized
341 // all heap related data structures, we may cause program failures.
342 void PSOldGen::post_resize() {
343 // First construct a memregion representing the new size
344 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
345 (HeapWord*)virtual_space()->high());
346 size_t new_word_size = new_memregion.word_size();
347
348 start_array()->set_covered_region(new_memregion);
349 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
350
351 // Did we expand?
352 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
353 if (object_space()->end() < virtual_space_high) {
354 // We need to mangle the newly expanded area. The memregion spans
355 // end -> new_end, we assume that top -> end is already mangled.
356 // This cannot be safely tested for, as allocation may be taking
357 // place.
358 MemRegion mangle_region(object_space()->end(), virtual_space_high);
359 object_space()->mangle_region(mangle_region);
360 }
361
362 // ALWAYS do this last!!
363 object_space()->set_end(virtual_space_high);
364
365 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
366 "Sanity");
367 }
368
369 size_t PSOldGen::gen_size_limit() {
370 return _max_gen_size;
371 }
372
373 void PSOldGen::reset_after_change() {
374 ShouldNotReachHere();
375 return;
376 }
377
378 size_t PSOldGen::available_for_expansion() {
379 ShouldNotReachHere();
380 return 0;
381 }
382
383 size_t PSOldGen::available_for_contraction() {
384 ShouldNotReachHere();
385 return 0;
386 }
387
388 void PSOldGen::print() const { print_on(tty);}
389 void PSOldGen::print_on(outputStream* st) const {
390 st->print(" %-15s", name());
391 if (PrintGCDetails && Verbose) {
392 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
393 capacity_in_bytes(), used_in_bytes());
394 } else {
395 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
396 capacity_in_bytes()/K, used_in_bytes()/K);
397 }
398 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
399 virtual_space()->low_boundary(),
400 virtual_space()->high(),
401 virtual_space()->high_boundary());
402
403 st->print(" object"); object_space()->print_on(st);
404 }
405
406 void PSOldGen::print_used_change(size_t prev_used) const {
407 gclog_or_tty->print(" [%s:", name());
408 gclog_or_tty->print(" " SIZE_FORMAT "K"
409 "->" SIZE_FORMAT "K"
410 "(" SIZE_FORMAT "K)",
411 prev_used / K, used_in_bytes() / K,
412 capacity_in_bytes() / K);
413 gclog_or_tty->print("]");
414 }
415
416 void PSOldGen::update_counters() {
417 if (UsePerfData) {
418 _space_counters->update_all();
419 _gen_counters->update_all();
420 }
421 }
422
423 #ifndef PRODUCT
424
425 void PSOldGen::space_invariants() {
426 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
427 "Space invariant");
428 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
429 "Space invariant");
430 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
431 "Space invariant");
432 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
433 "Space invariant");
434 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
435 "Space invariant");
436 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
437 "Space invariant");
438 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
439 "Space invariant");
440 }
441 #endif
442
443 void PSOldGen::verify(bool allow_dirty) {
444 object_space()->verify(allow_dirty);
445 }
446 class VerifyObjectStartArrayClosure : public ObjectClosure {
447 PSOldGen* _gen;
448 ObjectStartArray* _start_array;
449
450 public:
451 VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
452 _gen(gen), _start_array(start_array) { }
453
454 virtual void do_object(oop obj) {
455 HeapWord* test_addr = (HeapWord*)obj + 1;
456 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
457 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
458 }
459 };
460
461 void PSOldGen::verify_object_start_array() {
462 VerifyObjectStartArrayClosure check( this, &_start_array );
463 object_iterate(&check);
464 }