comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1088:3fc996d4edd2

6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection Summary: ScavengeALot now causes an incremental (but possibly partially young, in the G1 sense) collection. Some such collections may be abandoned on account of MMU specs. Band-aided a native leak associated with abandoned pauses, as well as an MMU tracker overflow related to frequent scavenge events in the face of a large MMU denominator interval; the latter is protected by a product flag that defaults to false. Reviewed-by: tonyp
author ysr
date Thu, 19 Nov 2009 13:43:25 -0800
parents fa2f65ebeb08
children db0d5eba9d20
comparison
equal deleted inserted replaced
1087:23b9a8d315fc 1088:3fc996d4edd2
1730 return 0; 1730 return 0;
1731 } 1731 }
1732 return car->free(); 1732 return car->free();
1733 } 1733 }
1734 1734
1735 void G1CollectedHeap::collect(GCCause::Cause cause) {
1736 // The caller doesn't have the Heap_lock
1737 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
1738 MutexLocker ml(Heap_lock);
1739 collect_locked(cause);
1740 }
1741
1742 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 1735 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
1743 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 1736 assert(Thread::current()->is_VM_thread(), "Precondition#1");
1744 assert(Heap_lock->is_locked(), "Precondition#2"); 1737 assert(Heap_lock->is_locked(), "Precondition#2");
1745 GCCauseSetter gcs(this, cause); 1738 GCCauseSetter gcs(this, cause);
1746 switch (cause) { 1739 switch (cause) {
1753 default: // XXX FIX ME 1746 default: // XXX FIX ME
1754 ShouldNotReachHere(); // Unexpected use of this function 1747 ShouldNotReachHere(); // Unexpected use of this function
1755 } 1748 }
1756 } 1749 }
1757 1750
1758 1751 void G1CollectedHeap::collect(GCCause::Cause cause) {
1759 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { 1752 // The caller doesn't have the Heap_lock
1760 // Don't want to do a GC until cleanup is completed. 1753 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
1761 wait_for_cleanup_complete(); 1754
1762 1755 int gc_count_before;
1763 // Read the GC count while holding the Heap_lock
1764 int gc_count_before = SharedHeap::heap()->total_collections();
1765 { 1756 {
1766 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 1757 MutexLocker ml(Heap_lock);
1767 VM_G1CollectFull op(gc_count_before, cause); 1758 // Read the GC count while holding the Heap_lock
1768 VMThread::execute(&op); 1759 gc_count_before = SharedHeap::heap()->total_collections();
1760
1761 // Don't want to do a GC until cleanup is completed.
1762 wait_for_cleanup_complete();
1763 } // We give up heap lock; VMThread::execute gets it back below
1764 switch (cause) {
1765 case GCCause::_scavenge_alot: {
1766 // Do an incremental pause, which might sometimes be abandoned.
1767 VM_G1IncCollectionPause op(gc_count_before, cause);
1768 VMThread::execute(&op);
1769 break;
1770 }
1771 default: {
1772 // In all other cases, we currently do a full gc.
1773 VM_G1CollectFull op(gc_count_before, cause);
1774 VMThread::execute(&op);
1775 }
1769 } 1776 }
1770 } 1777 }
1771 1778
1772 bool G1CollectedHeap::is_in(const void* p) const { 1779 bool G1CollectedHeap::is_in(const void* p) const {
1773 if (_g1_committed.contains(p)) { 1780 if (_g1_committed.contains(p)) {
2647 strcat(verbose_str, "(partial)"); 2654 strcat(verbose_str, "(partial)");
2648 } 2655 }
2649 if (g1_policy()->should_initiate_conc_mark()) 2656 if (g1_policy()->should_initiate_conc_mark())
2650 strcat(verbose_str, " (initial-mark)"); 2657 strcat(verbose_str, " (initial-mark)");
2651 2658
2652 GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
2653
2654 // if PrintGCDetails is on, we'll print long statistics information 2659 // if PrintGCDetails is on, we'll print long statistics information
2655 // in the collector policy code, so let's not print this as the output 2660 // in the collector policy code, so let's not print this as the output
2656 // is messy if we do. 2661 // is messy if we do.
2657 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 2662 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2658 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 2663 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2800 _young_list->first_survivor_region(), 2805 _young_list->first_survivor_region(),
2801 _young_list->last_survivor_region()); 2806 _young_list->last_survivor_region());
2802 _young_list->reset_auxilary_lists(); 2807 _young_list->reset_auxilary_lists();
2803 } 2808 }
2804 } else { 2809 } else {
2810 if (_in_cset_fast_test != NULL) {
2811 assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
2812 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
2813 // this is more for peace of mind; we're nulling them here and
2814 // we're expecting them to be null at the beginning of the next GC
2815 _in_cset_fast_test = NULL;
2816 _in_cset_fast_test_base = NULL;
2817 }
2818 // This looks confusing, because the DPT should really be empty
2819 // at this point -- since we have not done any collection work,
2820 // there should not be any derived pointers in the table to update;
2821 // however, there is some additional state in the DPT which is
2822 // reset at the end of the (null) "gc" here via the following call.
2823 // A better approach might be to split off that state resetting work
2824 // into a separate method that asserts that the DPT is empty and call
2825 // that here. That is deferred for now.
2805 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 2826 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
2806 } 2827 }
2807 2828
2808 if (evacuation_failed()) { 2829 if (evacuation_failed()) {
2809 _summary_bytes_used = recalculate_used(); 2830 _summary_bytes_used = recalculate_used();