comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 636:6c4cea9bfa11

6604422: G1: re-use half-promoted regions 6728271: G1: Cleanup G1CollectedHeap::get_gc_alloc_regions() Summary: It allows the last half-full region to be allocated to during a GC to be reused during the next GC. Reviewed-by: apetrusenko, jcoomes
author tonyp
date Sun, 15 Mar 2009 22:03:38 -0400
parents 87fa6e083d82
children 25e146966e7c
comparison
equal deleted inserted replaced
635:fe2441500281 636:6c4cea9bfa11
784 } 784 }
785 _cur_alloc_region = NULL; 785 _cur_alloc_region = NULL;
786 } 786 }
787 } 787 }
788 788
789 void G1CollectedHeap::abandon_gc_alloc_regions() {
790 // first, make sure that the GC alloc region list is empty (it should!)
791 assert(_gc_alloc_region_list == NULL, "invariant");
792 release_gc_alloc_regions(true /* totally */);
793 }
794
789 class PostMCRemSetClearClosure: public HeapRegionClosure { 795 class PostMCRemSetClearClosure: public HeapRegionClosure {
790 ModRefBarrierSet* _mr_bs; 796 ModRefBarrierSet* _mr_bs;
791 public: 797 public:
792 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 798 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
793 bool doHeapRegion(HeapRegion* r) { 799 bool doHeapRegion(HeapRegion* r) {
912 // refinement, if any are in progress. 918 // refinement, if any are in progress.
913 concurrent_mark()->abort(); 919 concurrent_mark()->abort();
914 920
915 // Make sure we'll choose a new allocation region afterwards. 921 // Make sure we'll choose a new allocation region afterwards.
916 abandon_cur_alloc_region(); 922 abandon_cur_alloc_region();
923 abandon_gc_alloc_regions();
917 assert(_cur_alloc_region == NULL, "Invariant."); 924 assert(_cur_alloc_region == NULL, "Invariant.");
918 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); 925 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
919 tear_down_region_lists(); 926 tear_down_region_lists();
920 set_used_regions_to_need_zero_fill(); 927 set_used_regions_to_need_zero_fill();
921 if (g1_policy()->in_young_gc_mode()) { 928 if (g1_policy()->in_young_gc_mode()) {
1304 new_mem_size/K); 1311 new_mem_size/K);
1305 } 1312 }
1306 } 1313 }
1307 1314
1308 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1315 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1309 release_gc_alloc_regions(); 1316 release_gc_alloc_regions(true /* totally */);
1310 tear_down_region_lists(); // We will rebuild them in a moment. 1317 tear_down_region_lists(); // We will rebuild them in a moment.
1311 shrink_helper(shrink_bytes); 1318 shrink_helper(shrink_bytes);
1312 rebuild_region_lists(); 1319 rebuild_region_lists();
1313 } 1320 }
1314 1321
1343 _unclean_regions_coming(false), 1350 _unclean_regions_coming(false),
1344 _young_list(new YoungList(this)), 1351 _young_list(new YoungList(this)),
1345 _gc_time_stamp(0), 1352 _gc_time_stamp(0),
1346 _surviving_young_words(NULL), 1353 _surviving_young_words(NULL),
1347 _in_cset_fast_test(NULL), 1354 _in_cset_fast_test(NULL),
1348 _in_cset_fast_test_base(NULL) 1355 _in_cset_fast_test_base(NULL) {
1349 {
1350 _g1h = this; // To catch bugs. 1356 _g1h = this; // To catch bugs.
1351 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1357 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1352 vm_exit_during_initialization("Failed necessary allocation."); 1358 vm_exit_during_initialization("Failed necessary allocation.");
1353 } 1359 }
1354 int n_queues = MAX2((int)ParallelGCThreads, 1); 1360 int n_queues = MAX2((int)ParallelGCThreads, 1);
1369 q->initialize(); 1375 q->initialize();
1370 _task_queues->register_queue(i, q); 1376 _task_queues->register_queue(i, q);
1371 } 1377 }
1372 1378
1373 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1379 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1374 _gc_alloc_regions[ap] = NULL; 1380 _gc_alloc_regions[ap] = NULL;
1375 _gc_alloc_region_counts[ap] = 0; 1381 _gc_alloc_region_counts[ap] = 0;
1376 } 1382 _retained_gc_alloc_regions[ap] = NULL;
1383 // by default, we do not retain a GC alloc region for each ap;
1384 // we'll override this, when appropriate, below
1385 _retain_gc_alloc_region[ap] = false;
1386 }
1387
1388 // We will try to remember the last half-full tenured region we
1389 // allocated to at the end of a collection so that we can re-use it
1390 // during the next collection.
1391 _retain_gc_alloc_region[GCAllocForTenured] = true;
1392
1377 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1393 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1378 } 1394 }
1379 1395
1380 jint G1CollectedHeap::initialize() { 1396 jint G1CollectedHeap::initialize() {
1381 os::enable_vtime(); 1397 os::enable_vtime();
2642 // We have to wait until now, because we don't want the region to 2658 // We have to wait until now, because we don't want the region to
2643 // be rescheduled for pop-evac during RS update. 2659 // be rescheduled for pop-evac during RS update.
2644 popular_region->set_popular_pending(false); 2660 popular_region->set_popular_pending(false);
2645 } 2661 }
2646 2662
2647 release_gc_alloc_regions(); 2663 release_gc_alloc_regions(false /* totally */);
2648 2664
2649 cleanup_surviving_young_words(); 2665 cleanup_surviving_young_words();
2650 2666
2651 if (g1_policy()->in_young_gc_mode()) { 2667 if (g1_policy()->in_young_gc_mode()) {
2652 _young_list->reset_sampled_info(); 2668 _young_list->reset_sampled_info();
2733 } 2749 }
2734 } 2750 }
2735 2751
2736 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 2752 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
2737 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); 2753 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
2754 // make sure we don't call set_gc_alloc_region() multiple times on
2755 // the same region
2756 assert(r == NULL || !r->is_gc_alloc_region(),
2757 "shouldn't already be a GC alloc region");
2738 HeapWord* original_top = NULL; 2758 HeapWord* original_top = NULL;
2739 if (r != NULL) 2759 if (r != NULL)
2740 original_top = r->top(); 2760 original_top = r->top();
2741 2761
2742 // We will want to record the used space in r as being there before gc. 2762 // We will want to record the used space in r as being there before gc.
2849 // TODO: allocation regions check 2869 // TODO: allocation regions check
2850 return true; 2870 return true;
2851 } 2871 }
2852 2872
2853 void G1CollectedHeap::get_gc_alloc_regions() { 2873 void G1CollectedHeap::get_gc_alloc_regions() {
2874 // First, let's check that the GC alloc region list is empty (it should)
2875 assert(_gc_alloc_region_list == NULL, "invariant");
2876
2854 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2877 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2878 assert(_gc_alloc_regions[ap] == NULL, "invariant");
2879
2855 // Create new GC alloc regions. 2880 // Create new GC alloc regions.
2856 HeapRegion* alloc_region = _gc_alloc_regions[ap]; 2881 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
2857 // Clear this alloc region, so that in case it turns out to be 2882 _retained_gc_alloc_regions[ap] = NULL;
2858 // unacceptable, we end up with no allocation region, rather than a bad 2883
2859 // one. 2884 if (alloc_region != NULL) {
2860 _gc_alloc_regions[ap] = NULL; 2885 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
2861 if (alloc_region == NULL || alloc_region->in_collection_set()) { 2886
2862 // Can't re-use old one. Allocate a new one. 2887 // let's make sure that the GC alloc region is not tagged as such
2888 // outside a GC operation
2889 assert(!alloc_region->is_gc_alloc_region(), "sanity");
2890
2891 if (alloc_region->in_collection_set() ||
2892 alloc_region->top() == alloc_region->end() ||
2893 alloc_region->top() == alloc_region->bottom()) {
2894 // we will discard the current GC alloc region if it's in the
2895 // collection set (it can happen!), if it's already full (no
2896 // point in using it), or if it's empty (this means that it
2897 // was emptied during a cleanup and it should be on the free
2898 // list now).
2899
2900 alloc_region = NULL;
2901 }
2902 }
2903
2904 if (alloc_region == NULL) {
2905 // we will get a new GC alloc region
2863 alloc_region = newAllocRegionWithExpansion(ap, 0); 2906 alloc_region = newAllocRegionWithExpansion(ap, 0);
2864 } 2907 }
2908
2865 if (alloc_region != NULL) { 2909 if (alloc_region != NULL) {
2910 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
2866 set_gc_alloc_region(ap, alloc_region); 2911 set_gc_alloc_region(ap, alloc_region);
2867 } 2912 }
2913
2914 assert(_gc_alloc_regions[ap] == NULL ||
2915 _gc_alloc_regions[ap]->is_gc_alloc_region(),
2916 "the GC alloc region should be tagged as such");
2917 assert(_gc_alloc_regions[ap] == NULL ||
2918 _gc_alloc_regions[ap] == _gc_alloc_region_list,
2919 "the GC alloc region should be the same as the GC alloc list head");
2868 } 2920 }
2869 // Set alternative regions for allocation purposes that have reached 2921 // Set alternative regions for allocation purposes that have reached
2870 // thier limit. 2922 // their limit.
2871 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2923 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2872 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); 2924 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
2873 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { 2925 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
2874 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; 2926 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
2875 } 2927 }
2876 } 2928 }
2877 assert(check_gc_alloc_regions(), "alloc regions messed up"); 2929 assert(check_gc_alloc_regions(), "alloc regions messed up");
2878 } 2930 }
2879 2931
2880 void G1CollectedHeap::release_gc_alloc_regions() { 2932 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
2881 // We keep a separate list of all regions that have been alloc regions in 2933 // We keep a separate list of all regions that have been alloc regions in
2882 // the current collection pause. Forget that now. 2934 // the current collection pause. Forget that now. This method will
2935 // untag the GC alloc regions and tear down the GC alloc region
2936 // list. It's desirable that no regions are tagged as GC alloc
2937 // outside GCs.
2883 forget_alloc_region_list(); 2938 forget_alloc_region_list();
2884 2939
2885 // The current alloc regions contain objs that have survived 2940 // The current alloc regions contain objs that have survived
2886 // collection. Make them no longer GC alloc regions. 2941 // collection. Make them no longer GC alloc regions.
2887 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2942 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2888 HeapRegion* r = _gc_alloc_regions[ap]; 2943 HeapRegion* r = _gc_alloc_regions[ap];
2889 if (r != NULL && r->is_empty()) { 2944 _retained_gc_alloc_regions[ap] = NULL;
2890 { 2945
2946 if (r != NULL) {
2947 // we retain nothing on _gc_alloc_regions between GCs
2948 set_gc_alloc_region(ap, NULL);
2949 _gc_alloc_region_counts[ap] = 0;
2950
2951 if (r->is_empty()) {
2952 // we didn't actually allocate anything in it; let's just put
2953 // it on the free list
2891 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 2954 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
2892 r->set_zero_fill_complete(); 2955 r->set_zero_fill_complete();
2893 put_free_region_on_list_locked(r); 2956 put_free_region_on_list_locked(r);
2957 } else if (_retain_gc_alloc_region[ap] && !totally) {
2958 // retain it so that we can use it at the beginning of the next GC
2959 _retained_gc_alloc_regions[ap] = r;
2894 } 2960 }
2895 } 2961 }
2896 // set_gc_alloc_region will also NULLify all aliases to the region 2962 }
2897 set_gc_alloc_region(ap, NULL); 2963 }
2898 _gc_alloc_region_counts[ap] = 0; 2964
2899 } 2965 #ifndef PRODUCT
2900 } 2966 // Useful for debugging
2967
2968 void G1CollectedHeap::print_gc_alloc_regions() {
2969 gclog_or_tty->print_cr("GC alloc regions");
2970 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2971 HeapRegion* r = _gc_alloc_regions[ap];
2972 if (r == NULL) {
2973 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL);
2974 } else {
2975 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT,
2976 ap, r->bottom(), r->used());
2977 }
2978 }
2979 }
2980 #endif // PRODUCT
2901 2981
2902 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 2982 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
2903 _drain_in_progress = false; 2983 _drain_in_progress = false;
2904 set_evac_failure_closure(cl); 2984 set_evac_failure_closure(cl);
2905 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 2985 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);