comparison src/share/vm/memory/defNewGeneration.cpp @ 1888:a7214d79fcf1

6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data Summary: Deprecated HandlePromotionFailure, removing the ability to turn off that feature, did away with one epoch look-ahead when deciding if a scavenge is likely to fail, relying on current data. Reviewed-by: jmasa, johnc, poonam
author ysr
date Sat, 23 Oct 2010 23:03:49 -0700
parents 894b1d7c7e01
children c766bae6c14d
comparison
equal deleted inserted replaced
1887:cd3ef3fd20dd 1888:a7214d79fcf1
508 508
509 // If the next generation is too full to accomodate promotion 509 // If the next generation is too full to accomodate promotion
510 // from this generation, pass on collection; let the next generation 510 // from this generation, pass on collection; let the next generation
511 // do it. 511 // do it.
512 if (!collection_attempt_is_safe()) { 512 if (!collection_attempt_is_safe()) {
513 gch->set_incremental_collection_will_fail(); 513 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
514 return; 514 return;
515 } 515 }
516 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 516 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
517 517
518 init_assuming_no_promotion_failure(); 518 init_assuming_no_promotion_failure();
594 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 594 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
595 size_policy->reset_gc_overhead_limit_count(); 595 size_policy->reset_gc_overhead_limit_count();
596 if (PrintGC && !PrintGCDetails) { 596 if (PrintGC && !PrintGCDetails) {
597 gch->print_heap_change(gch_prev_used); 597 gch->print_heap_change(gch_prev_used);
598 } 598 }
599 assert(!gch->incremental_collection_failed(), "Should be clear");
599 } else { 600 } else {
600 assert(HandlePromotionFailure,
601 "Should not be here unless promotion failure handling is on");
602 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 601 assert(_promo_failure_scan_stack.is_empty(), "post condition");
603 _promo_failure_scan_stack.clear(true); // Clear cached segments. 602 _promo_failure_scan_stack.clear(true); // Clear cached segments.
604 603
605 remove_forwarding_pointers(); 604 remove_forwarding_pointers();
606 if (PrintGCDetails) { 605 if (PrintGCDetails) {
611 // case there can be live objects in to-space 610 // case there can be live objects in to-space
612 // as a result of a partial evacuation of eden 611 // as a result of a partial evacuation of eden
613 // and from-space. 612 // and from-space.
614 swap_spaces(); // For uniformity wrt ParNewGeneration. 613 swap_spaces(); // For uniformity wrt ParNewGeneration.
615 from()->set_next_compaction_space(to()); 614 from()->set_next_compaction_space(to());
616 gch->set_incremental_collection_will_fail(); 615 gch->set_incremental_collection_failed();
617 616
618 // Inform the next generation that a promotion failure occurred. 617 // Inform the next generation that a promotion failure occurred.
619 _next_gen->promotion_failure_occurred(); 618 _next_gen->promotion_failure_occurred();
620 619
621 // Reset the PromotionFailureALot counters. 620 // Reset the PromotionFailureALot counters.
698 697
699 // Otherwise try allocating obj tenured 698 // Otherwise try allocating obj tenured
700 if (obj == NULL) { 699 if (obj == NULL) {
701 obj = _next_gen->promote(old, s); 700 obj = _next_gen->promote(old, s);
702 if (obj == NULL) { 701 if (obj == NULL) {
703 if (!HandlePromotionFailure) {
704 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
705 // is incorrectly set. In any case, its seriously wrong to be here!
706 vm_exit_out_of_memory(s*wordSize, "promotion");
707 }
708
709 handle_promotion_failure(old); 702 handle_promotion_failure(old);
710 return old; 703 return old;
711 } 704 }
712 } else { 705 } else {
713 // Prefetch beyond obj 706 // Prefetch beyond obj
810 GenCollectedHeap* gch = GenCollectedHeap::heap(); 803 GenCollectedHeap* gch = GenCollectedHeap::heap();
811 _next_gen = gch->next_gen(this); 804 _next_gen = gch->next_gen(this);
812 assert(_next_gen != NULL, 805 assert(_next_gen != NULL,
813 "This must be the youngest gen, and not the only gen"); 806 "This must be the youngest gen, and not the only gen");
814 } 807 }
815 808 return _next_gen->promotion_attempt_is_safe(used());
816 // Decide if there's enough room for a full promotion
817 // When using extremely large edens, we effectively lose a
818 // large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
819 // flag to reduce the minimum evacuation space requirements. If
820 // there is not enough space to evacuate eden during a scavenge,
821 // the VM will immediately exit with an out of memory error.
822 // This flag has not been tested
823 // with collectors other than simple mark & sweep.
824 //
825 // Note that with the addition of promotion failure handling, the
826 // VM will not immediately exit but will undo the young generation
827 // collection. The parameter is left here for compatibility.
828 const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
829
830 // worst_case_evacuation is based on "used()". For the case where this
831 // method is called after a collection, this is still appropriate because
832 // the case that needs to be detected is one in which a full collection
833 // has been done and has overflowed into the young generation. In that
834 // case a minor collection will fail (the overflow of the full collection
835 // means there is no space in the old generation for any promotion).
836 size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
837
838 return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
839 HandlePromotionFailure);
840 } 809 }
841 810
842 void DefNewGeneration::gc_epilogue(bool full) { 811 void DefNewGeneration::gc_epilogue(bool full) {
843 // Check if the heap is approaching full after a collection has 812 // Check if the heap is approaching full after a collection has
844 // been done. Generally the young generation is empty at 813 // been done. Generally the young generation is empty at
845 // a minimum at the end of a collection. If it is not, then 814 // a minimum at the end of a collection. If it is not, then
846 // the heap is approaching full. 815 // the heap is approaching full.
847 GenCollectedHeap* gch = GenCollectedHeap::heap(); 816 GenCollectedHeap* gch = GenCollectedHeap::heap();
848 clear_should_allocate_from_space(); 817 if (full) {
849 if (collection_attempt_is_safe()) { 818 assert(!GC_locker::is_active(), "We should not be executing here");
850 gch->clear_incremental_collection_will_fail(); 819 if (!collection_attempt_is_safe()) {
820 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
821 set_should_allocate_from_space(); // we seem to be running out of space
822 } else {
823 gch->clear_incremental_collection_failed(); // We just did a full collection
824 clear_should_allocate_from_space(); // if set
825 }
851 } else { 826 } else {
852 gch->set_incremental_collection_will_fail(); 827 assert(!gch->incremental_collection_failed(), "Error");
853 if (full) { // we seem to be running out of space
854 set_should_allocate_from_space();
855 }
856 } 828 }
857 829
858 if (ZapUnusedHeapArea) { 830 if (ZapUnusedHeapArea) {
859 eden()->check_mangled_unused_area_complete(); 831 eden()->check_mangled_unused_area_complete();
860 from()->check_mangled_unused_area_complete(); 832 from()->check_mangled_unused_area_complete();