comparison src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @ 1387:0bfd3fb24150

6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit. Summary: Ensure a full GC that clears SoftReferences before throwing an out-of-memory Reviewed-by: ysr, jcoomes
author jmasa
date Tue, 13 Apr 2010 13:52:10 -0700
parents 7b0e9cba0307
children c18cbe5936b8
comparison
equal deleted inserted replaced
1361:6b73e879f1c2 1387:0bfd3fb24150
1 /* 1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
52 52
53 jint ParallelScavengeHeap::initialize() { 53 jint ParallelScavengeHeap::initialize() {
54 CollectedHeap::pre_initialize(); 54 CollectedHeap::pre_initialize();
55 55
56 // Cannot be initialized until after the flags are parsed 56 // Cannot be initialized until after the flags are parsed
57 GenerationSizer flag_parser; 57 // GenerationSizer flag_parser;
58 58 _collector_policy = new GenerationSizer();
59 size_t yg_min_size = flag_parser.min_young_gen_size(); 59
60 size_t yg_max_size = flag_parser.max_young_gen_size(); 60 size_t yg_min_size = _collector_policy->min_young_gen_size();
61 size_t og_min_size = flag_parser.min_old_gen_size(); 61 size_t yg_max_size = _collector_policy->max_young_gen_size();
62 size_t og_max_size = flag_parser.max_old_gen_size(); 62 size_t og_min_size = _collector_policy->min_old_gen_size();
63 size_t og_max_size = _collector_policy->max_old_gen_size();
63 // Why isn't there a min_perm_gen_size()? 64 // Why isn't there a min_perm_gen_size()?
64 size_t pg_min_size = flag_parser.perm_gen_size(); 65 size_t pg_min_size = _collector_policy->perm_gen_size();
65 size_t pg_max_size = flag_parser.max_perm_gen_size(); 66 size_t pg_max_size = _collector_policy->max_perm_gen_size();
66 67
67 trace_gen_sizes("ps heap raw", 68 trace_gen_sizes("ps heap raw",
68 pg_min_size, pg_max_size, 69 pg_min_size, pg_max_size,
69 og_min_size, og_max_size, 70 og_min_size, og_max_size,
70 yg_min_size, yg_max_size); 71 yg_min_size, yg_max_size);
87 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 88 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
88 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 89 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
89 // move to the common code. 90 // move to the common code.
90 yg_min_size = align_size_up(yg_min_size, yg_align); 91 yg_min_size = align_size_up(yg_min_size, yg_align);
91 yg_max_size = align_size_up(yg_max_size, yg_align); 92 yg_max_size = align_size_up(yg_max_size, yg_align);
92 size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); 93 size_t yg_cur_size =
94 align_size_up(_collector_policy->young_gen_size(), yg_align);
93 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 95 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
94 96
95 og_min_size = align_size_up(og_min_size, og_align); 97 og_min_size = align_size_up(og_min_size, og_align);
96 og_max_size = align_size_up(og_max_size, og_align); 98 og_max_size = align_size_up(og_max_size, og_align);
97 size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); 99 size_t og_cur_size =
100 align_size_up(_collector_policy->old_gen_size(), og_align);
98 og_cur_size = MAX2(og_cur_size, og_min_size); 101 og_cur_size = MAX2(og_cur_size, og_min_size);
99 102
100 pg_min_size = align_size_up(pg_min_size, pg_align); 103 pg_min_size = align_size_up(pg_min_size, pg_align);
101 pg_max_size = align_size_up(pg_max_size, pg_align); 104 pg_max_size = align_size_up(pg_max_size, pg_align);
102 size_t pg_cur_size = pg_min_size; 105 size_t pg_cur_size = pg_min_size;
352 bool is_tlab, 355 bool is_tlab,
353 bool* gc_overhead_limit_was_exceeded) { 356 bool* gc_overhead_limit_was_exceeded) {
354 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 357 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
355 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 358 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
356 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 359 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
360
361 // In general gc_overhead_limit_was_exceeded should be false so
362 // set it so here and reset it to true only if the gc time
363 // limit is being exceeded as checked below.
364 *gc_overhead_limit_was_exceeded = false;
357 365
358 HeapWord* result = young_gen()->allocate(size, is_tlab); 366 HeapWord* result = young_gen()->allocate(size, is_tlab);
359 367
360 uint loop_count = 0; 368 uint loop_count = 0;
361 uint gc_count = 0; 369 uint gc_count = 0;
426 } 434 }
427 } 435 }
428 436
429 if (result == NULL) { 437 if (result == NULL) {
430 438
431 // Exit the loop if if the gc time limit has been exceeded.
432 // The allocation must have failed above (result must be NULL),
433 // and the most recent collection must have exceeded the
434 // gc time limit. Exit the loop so that an out-of-memory
435 // will be thrown (returning a NULL will do that), but
436 // clear gc_time_limit_exceeded so that the next collection
437 // will succeeded if the applications decides to handle the
438 // out-of-memory and tries to go on.
439 *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
440 if (size_policy()->gc_time_limit_exceeded()) {
441 size_policy()->set_gc_time_limit_exceeded(false);
442 if (PrintGCDetails && Verbose) {
443 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
444 "return NULL because gc_time_limit_exceeded is set");
445 }
446 return NULL;
447 }
448
449 // Generate a VM operation 439 // Generate a VM operation
450 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); 440 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
451 VMThread::execute(&op); 441 VMThread::execute(&op);
452 442
453 // Did the VM operation execute? If so, return the result directly. 443 // Did the VM operation execute? If so, return the result directly.
461 // and/or stall as necessary. 451 // and/or stall as necessary.
462 if (op.gc_locked()) { 452 if (op.gc_locked()) {
463 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 453 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
464 continue; // retry and/or stall as necessary 454 continue; // retry and/or stall as necessary
465 } 455 }
466 // If a NULL result is being returned, an out-of-memory 456
467 // will be thrown now. Clear the gc_time_limit_exceeded 457 // Exit the loop if the gc time limit has been exceeded.
468 // flag to avoid the following situation. 458 // The allocation must have failed above ("result" guarding
469 // gc_time_limit_exceeded is set during a collection 459 // this path is NULL) and the most recent collection has exceeded the
470 // the collection fails to return enough space and an OOM is thrown 460 // gc overhead limit (although enough may have been collected to
471 // the next GC is skipped because the gc_time_limit_exceeded 461 // satisfy the allocation). Exit the loop so that an out-of-memory
472 // flag is set and another OOM is thrown 462 // will be thrown (return a NULL ignoring the contents of
473 if (op.result() == NULL) { 463 // op.result()),
474 size_policy()->set_gc_time_limit_exceeded(false); 464 // but clear gc_overhead_limit_exceeded so that the next collection
465 // starts with a clean slate (i.e., forgets about previous overhead
466 // excesses). Fill op.result() with a filler object so that the
467 // heap remains parsable.
468 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
469 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
470 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
471 if (limit_exceeded && softrefs_clear) {
472 *gc_overhead_limit_was_exceeded = true;
473 size_policy()->set_gc_overhead_limit_exceeded(false);
474 if (PrintGCDetails && Verbose) {
475 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
476 "return NULL because gc_overhead_limit_exceeded is set");
477 }
478 if (op.result() != NULL) {
479 CollectedHeap::fill_with_object(op.result(), size);
480 }
481 return NULL;
475 } 482 }
483
476 return op.result(); 484 return op.result();
477 } 485 }
478 } 486 }
479 487
480 // The policy object will prevent us from looping forever. If the 488 // The policy object will prevent us from looping forever. If the
611 // Exit the loop if the gc time limit has been exceeded. 619 // Exit the loop if the gc time limit has been exceeded.
612 // The allocation must have failed above (result must be NULL), 620 // The allocation must have failed above (result must be NULL),
613 // and the most recent collection must have exceeded the 621 // and the most recent collection must have exceeded the
614 // gc time limit. Exit the loop so that an out-of-memory 622 // gc time limit. Exit the loop so that an out-of-memory
615 // will be thrown (returning a NULL will do that), but 623 // will be thrown (returning a NULL will do that), but
616 // clear gc_time_limit_exceeded so that the next collection 624 // clear gc_overhead_limit_exceeded so that the next collection
617 // will succeeded if the applications decides to handle the 625 // will succeeded if the applications decides to handle the
618 // out-of-memory and tries to go on. 626 // out-of-memory and tries to go on.
619 if (size_policy()->gc_time_limit_exceeded()) { 627 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
620 size_policy()->set_gc_time_limit_exceeded(false); 628 if (limit_exceeded) {
629 size_policy()->set_gc_overhead_limit_exceeded(false);
621 if (PrintGCDetails && Verbose) { 630 if (PrintGCDetails && Verbose) {
622 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " 631 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
623 "return NULL because gc_time_limit_exceeded is set"); 632 " return NULL because gc_overhead_limit_exceeded is set");
624 } 633 }
625 assert(result == NULL, "Allocation did not fail"); 634 assert(result == NULL, "Allocation did not fail");
626 return NULL; 635 return NULL;
627 } 636 }
628 637
641 if (op.gc_locked()) { 650 if (op.gc_locked()) {
642 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 651 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
643 continue; // retry and/or stall as necessary 652 continue; // retry and/or stall as necessary
644 } 653 }
645 // If a NULL results is being returned, an out-of-memory 654 // If a NULL results is being returned, an out-of-memory
646 // will be thrown now. Clear the gc_time_limit_exceeded 655 // will be thrown now. Clear the gc_overhead_limit_exceeded
647 // flag to avoid the following situation. 656 // flag to avoid the following situation.
648 // gc_time_limit_exceeded is set during a collection 657 // gc_overhead_limit_exceeded is set during a collection
649 // the collection fails to return enough space and an OOM is thrown 658 // the collection fails to return enough space and an OOM is thrown
650 // the next GC is skipped because the gc_time_limit_exceeded 659 // a subsequent GC prematurely throws an out-of-memory because
651 // flag is set and another OOM is thrown 660 // the gc_overhead_limit_exceeded counts did not start
661 // again from 0.
652 if (op.result() == NULL) { 662 if (op.result() == NULL) {
653 size_policy()->set_gc_time_limit_exceeded(false); 663 size_policy()->reset_gc_overhead_limit_count();
654 } 664 }
655 return op.result(); 665 return op.result();
656 } 666 }
657 } 667 }
658 668