comparison src/share/vm/memory/defNewGeneration.cpp @ 269:850fdf70db2b

Merge
author jmasa
date Mon, 28 Jul 2008 15:30:23 -0700
parents d1605aabd0a1 12eea04c8b06
children 1ee8caae33af
comparison
equal deleted inserted replaced
238:3df2fe7c4451 269:850fdf70db2b
170 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 170 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
171 _gen_counters); 171 _gen_counters);
172 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 172 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
173 _gen_counters); 173 _gen_counters);
174 174
175 compute_space_boundaries(0); 175 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
176 update_counters(); 176 update_counters();
177 _next_gen = NULL; 177 _next_gen = NULL;
178 _tenuring_threshold = MaxTenuringThreshold; 178 _tenuring_threshold = MaxTenuringThreshold;
179 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 179 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
180 } 180 }
181 181
182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) { 182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
183 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); 183 bool clear_space,
184 bool mangle_space) {
185 uintx alignment =
186 GenCollectedHeap::heap()->collector_policy()->min_alignment();
187
188 // If the spaces are being cleared (only done at heap initialization
189 // currently), the survivor spaces need not be empty.
190 // Otherwise, no care is taken for used areas in the survivor spaces
191 // so check.
192 assert(clear_space || (to()->is_empty() && from()->is_empty()),
193 "Initialization of the survivor spaces assumes these are empty");
184 194
185 // Compute sizes 195 // Compute sizes
186 uintx size = _virtual_space.committed_size(); 196 uintx size = _virtual_space.committed_size();
187 uintx survivor_size = compute_survivor_size(size, alignment); 197 uintx survivor_size = compute_survivor_size(size, alignment);
188 uintx eden_size = size - (2*survivor_size); 198 uintx eden_size = size - (2*survivor_size);
212 222
213 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 223 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
214 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 224 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
215 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 225 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
216 226
217 eden()->initialize(edenMR, (minimum_eden_size == 0)); 227 // A minimum eden size implies that there is a part of eden that
218 // If minumum_eden_size != 0, we will not have cleared any 228 // is being used and that affects the initialization of any
229 // newly formed eden.
230 bool live_in_eden = minimum_eden_size > 0;
231
232 // If not clearing the spaces, do some checking to verify that
233 // the space are already mangled.
234 if (!clear_space) {
235 // Must check mangling before the spaces are reshaped. Otherwise,
236 // the bottom or end of one space may have moved into another
237 // a failure of the check may not correctly indicate which space
238 // is not properly mangled.
239 if (ZapUnusedHeapArea) {
240 HeapWord* limit = (HeapWord*) _virtual_space.high();
241 eden()->check_mangled_unused_area(limit);
242 from()->check_mangled_unused_area(limit);
243 to()->check_mangled_unused_area(limit);
244 }
245 }
246
247 // Reset the spaces for their new regions.
248 eden()->initialize(edenMR,
249 clear_space && !live_in_eden,
250 SpaceDecorator::Mangle);
251 // If clear_space and live_in_eden, we will not have cleared any
219 // portion of eden above its top. This can cause newly 252 // portion of eden above its top. This can cause newly
220 // expanded space not to be mangled if using ZapUnusedHeapArea. 253 // expanded space not to be mangled if using ZapUnusedHeapArea.
221 // We explicitly do such mangling here. 254 // We explicitly do such mangling here.
222 if (ZapUnusedHeapArea && (minimum_eden_size != 0)) { 255 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
223 eden()->mangle_unused_area(); 256 eden()->mangle_unused_area();
224 } 257 }
225 from()->initialize(fromMR, true); 258 from()->initialize(fromMR, clear_space, mangle_space);
226 to()->initialize(toMR , true); 259 to()->initialize(toMR, clear_space, mangle_space);
260
261 // Set next compaction spaces.
227 eden()->set_next_compaction_space(from()); 262 eden()->set_next_compaction_space(from());
228 // The to-space is normally empty before a compaction so need 263 // The to-space is normally empty before a compaction so need
229 // not be considered. The exception is during promotion 264 // not be considered. The exception is during promotion
230 // failure handling when to-space can contain live objects. 265 // failure handling when to-space can contain live objects.
231 from()->set_next_compaction_space(NULL); 266 from()->set_next_compaction_space(NULL);
248 } 283 }
249 } 284 }
250 285
251 bool DefNewGeneration::expand(size_t bytes) { 286 bool DefNewGeneration::expand(size_t bytes) {
252 MutexLocker x(ExpandHeap_lock); 287 MutexLocker x(ExpandHeap_lock);
288 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
253 bool success = _virtual_space.expand_by(bytes); 289 bool success = _virtual_space.expand_by(bytes);
290 if (success && ZapUnusedHeapArea) {
291 // Mangle newly committed space immediately because it
292 // can be done here more simply that after the new
293 // spaces have been computed.
294 HeapWord* new_high = (HeapWord*) _virtual_space.high();
295 MemRegion mangle_region(prev_high, new_high);
296 SpaceMangler::mangle_region(mangle_region);
297 }
254 298
255 // Do not attempt an expand-to-the reserve size. The 299 // Do not attempt an expand-to-the reserve size. The
256 // request should properly observe the maximum size of 300 // request should properly observe the maximum size of
257 // the generation so an expand-to-reserve should be 301 // the generation so an expand-to-reserve should be
258 // unnecessary. Also a second call to expand-to-reserve 302 // unnecessary. Also a second call to expand-to-reserve
260 // For example if the first expand fail for unknown reasons, 304 // For example if the first expand fail for unknown reasons,
261 // but the second succeeds and expands the heap to its maximum 305 // but the second succeeds and expands the heap to its maximum
262 // value. 306 // value.
263 if (GC_locker::is_active()) { 307 if (GC_locker::is_active()) {
264 if (PrintGC && Verbose) { 308 if (PrintGC && Verbose) {
265 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); 309 gclog_or_tty->print_cr("Garbage collection disabled, "
310 "expanded heap instead");
266 } 311 }
267 } 312 }
268 313
269 return success; 314 return success;
270 } 315 }
324 assert(change % alignment == 0, "just checking"); 369 assert(change % alignment == 0, "just checking");
325 _virtual_space.shrink_by(change); 370 _virtual_space.shrink_by(change);
326 changed = true; 371 changed = true;
327 } 372 }
328 if (changed) { 373 if (changed) {
329 compute_space_boundaries(eden()->used()); 374 // The spaces have already been mangled at this point but
330 MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); 375 // may not have been cleared (set top = bottom) and should be.
376 // Mangling was done when the heap was being expanded.
377 compute_space_boundaries(eden()->used(),
378 SpaceDecorator::Clear,
379 SpaceDecorator::DontMangle);
380 MemRegion cmr((HeapWord*)_virtual_space.low(),
381 (HeapWord*)_virtual_space.high());
331 Universe::heap()->barrier_set()->resize_covered_region(cmr); 382 Universe::heap()->barrier_set()->resize_covered_region(cmr);
332 if (Verbose && PrintGC) { 383 if (Verbose && PrintGC) {
333 size_t new_size_after = _virtual_space.committed_size(); 384 size_t new_size_after = _virtual_space.committed_size();
334 size_t eden_size_after = eden()->capacity(); 385 size_t eden_size_after = eden()->capacity();
335 size_t survivor_size_after = from()->capacity(); 386 size_t survivor_size_after = from()->capacity();
336 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" 387 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
388 SIZE_FORMAT "K [eden="
337 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 389 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
338 new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); 390 new_size_before/K, new_size_after/K,
391 eden_size_after/K, survivor_size_after/K);
339 if (WizardMode) { 392 if (WizardMode) {
340 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 393 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
341 thread_increase_size/K, threads_count); 394 thread_increase_size/K, threads_count);
342 } 395 }
343 gclog_or_tty->cr(); 396 gclog_or_tty->cr();
478 // These can be shared for all code paths 531 // These can be shared for all code paths
479 IsAliveClosure is_alive(this); 532 IsAliveClosure is_alive(this);
480 ScanWeakRefClosure scan_weak_ref(this); 533 ScanWeakRefClosure scan_weak_ref(this);
481 534
482 age_table()->clear(); 535 age_table()->clear();
483 to()->clear(); 536 to()->clear(SpaceDecorator::Mangle);
484 537
485 gch->rem_set()->prepare_for_younger_refs_iterate(false); 538 gch->rem_set()->prepare_for_younger_refs_iterate(false);
486 539
487 assert(gch->no_allocs_since_save_marks(0), 540 assert(gch->no_allocs_since_save_marks(0),
488 "save marks have not been newly set."); 541 "save marks have not been newly set.");
523 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 576 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
524 ref_processor()->process_discovered_references( 577 ref_processor()->process_discovered_references(
525 soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL); 578 soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
526 if (!promotion_failed()) { 579 if (!promotion_failed()) {
527 // Swap the survivor spaces. 580 // Swap the survivor spaces.
528 eden()->clear(); 581 eden()->clear(SpaceDecorator::Mangle);
529 from()->clear(); 582 from()->clear(SpaceDecorator::Mangle);
583 if (ZapUnusedHeapArea) {
584 // This is now done here because of the piece-meal mangling which
585 // can check for valid mangling at intermediate points in the
586 // collection(s). When a minor collection fails to collect
587 // sufficient space resizing of the young generation can occur
588 // an redistribute the spaces in the young generation. Mangle
589 // here so that unzapped regions don't get distributed to
590 // other spaces.
591 to()->mangle_unused_area();
592 }
530 swap_spaces(); 593 swap_spaces();
531 594
532 assert(to()->is_empty(), "to space should be empty now"); 595 assert(to()->is_empty(), "to space should be empty now");
533 596
534 // Set the desired survivor size to half the real survivor space 597 // Set the desired survivor size to half the real survivor space
751 sb->next = list; 814 sb->next = list;
752 list = sb; 815 list = sb;
753 } 816 }
754 } 817 }
755 818
819 void DefNewGeneration::reset_scratch() {
820 // If contributing scratch in to_space, mangle all of
821 // to_space if ZapUnusedHeapArea. This is needed because
822 // top is not maintained while using to-space as scratch.
823 if (ZapUnusedHeapArea) {
824 to()->mangle_unused_area_complete();
825 }
826 }
827
756 bool DefNewGeneration::collection_attempt_is_safe() { 828 bool DefNewGeneration::collection_attempt_is_safe() {
757 if (!to()->is_empty()) { 829 if (!to()->is_empty()) {
758 return false; 830 return false;
759 } 831 }
760 if (_next_gen == NULL) { 832 if (_next_gen == NULL) {
804 if (full) { // we seem to be running out of space 876 if (full) { // we seem to be running out of space
805 set_should_allocate_from_space(); 877 set_should_allocate_from_space();
806 } 878 }
807 } 879 }
808 880
881 if (ZapUnusedHeapArea) {
882 eden()->check_mangled_unused_area_complete();
883 from()->check_mangled_unused_area_complete();
884 to()->check_mangled_unused_area_complete();
885 }
886
809 // update the generation and space performance counters 887 // update the generation and space performance counters
810 update_counters(); 888 update_counters();
811 gch->collector_policy()->counters()->update_counters(); 889 gch->collector_policy()->counters()->update_counters();
812 } 890 }
891
892 void DefNewGeneration::record_spaces_top() {
893 assert(ZapUnusedHeapArea, "Not mangling unused space");
894 eden()->set_top_for_allocations();
895 to()->set_top_for_allocations();
896 from()->set_top_for_allocations();
897 }
898
813 899
814 void DefNewGeneration::update_counters() { 900 void DefNewGeneration::update_counters() {
815 if (UsePerfData) { 901 if (UsePerfData) {
816 _eden_counters->update_all(); 902 _eden_counters->update_all();
817 _from_counters->update_all(); 903 _from_counters->update_all();