comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 640:ba50942c8138

Merge
author tonyp
date Wed, 18 Mar 2009 11:37:48 -0400
parents 7bb995fbd3c0 25e146966e7c
children bd441136a5ce
comparison
equal deleted inserted replaced
629:2581d90c6c9b 640:ba50942c8138
784 } 784 }
785 _cur_alloc_region = NULL; 785 _cur_alloc_region = NULL;
786 } 786 }
787 } 787 }
788 788
789 void G1CollectedHeap::abandon_gc_alloc_regions() {
790 // first, make sure that the GC alloc region list is empty (it should!)
791 assert(_gc_alloc_region_list == NULL, "invariant");
792 release_gc_alloc_regions(true /* totally */);
793 }
794
789 class PostMCRemSetClearClosure: public HeapRegionClosure { 795 class PostMCRemSetClearClosure: public HeapRegionClosure {
790 ModRefBarrierSet* _mr_bs; 796 ModRefBarrierSet* _mr_bs;
791 public: 797 public:
792 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 798 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
793 bool doHeapRegion(HeapRegion* r) { 799 bool doHeapRegion(HeapRegion* r) {
912 // refinement, if any are in progress. 918 // refinement, if any are in progress.
913 concurrent_mark()->abort(); 919 concurrent_mark()->abort();
914 920
915 // Make sure we'll choose a new allocation region afterwards. 921 // Make sure we'll choose a new allocation region afterwards.
916 abandon_cur_alloc_region(); 922 abandon_cur_alloc_region();
923 abandon_gc_alloc_regions();
917 assert(_cur_alloc_region == NULL, "Invariant."); 924 assert(_cur_alloc_region == NULL, "Invariant.");
918 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); 925 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
919 tear_down_region_lists(); 926 tear_down_region_lists();
920 set_used_regions_to_need_zero_fill(); 927 set_used_regions_to_need_zero_fill();
921 if (g1_policy()->in_young_gc_mode()) { 928 if (g1_policy()->in_young_gc_mode()) {
952 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
953 960
954 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
955 HandleMark hm; // Discard invalid handles created during verification 962 HandleMark hm; // Discard invalid handles created during verification
956 gclog_or_tty->print(" VerifyAfterGC:"); 963 gclog_or_tty->print(" VerifyAfterGC:");
964 prepare_for_verify();
957 Universe::verify(false); 965 Universe::verify(false);
958 } 966 }
959 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 967 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
960 968
961 reset_gc_time_stamp(); 969 reset_gc_time_stamp();
1304 new_mem_size/K); 1312 new_mem_size/K);
1305 } 1313 }
1306 } 1314 }
1307 1315
1308 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1316 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1309 release_gc_alloc_regions(); 1317 release_gc_alloc_regions(true /* totally */);
1310 tear_down_region_lists(); // We will rebuild them in a moment. 1318 tear_down_region_lists(); // We will rebuild them in a moment.
1311 shrink_helper(shrink_bytes); 1319 shrink_helper(shrink_bytes);
1312 rebuild_region_lists(); 1320 rebuild_region_lists();
1313 } 1321 }
1314 1322
1343 _unclean_regions_coming(false), 1351 _unclean_regions_coming(false),
1344 _young_list(new YoungList(this)), 1352 _young_list(new YoungList(this)),
1345 _gc_time_stamp(0), 1353 _gc_time_stamp(0),
1346 _surviving_young_words(NULL), 1354 _surviving_young_words(NULL),
1347 _in_cset_fast_test(NULL), 1355 _in_cset_fast_test(NULL),
1348 _in_cset_fast_test_base(NULL) 1356 _in_cset_fast_test_base(NULL) {
1349 {
1350 _g1h = this; // To catch bugs. 1357 _g1h = this; // To catch bugs.
1351 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1358 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1352 vm_exit_during_initialization("Failed necessary allocation."); 1359 vm_exit_during_initialization("Failed necessary allocation.");
1353 } 1360 }
1354 int n_queues = MAX2((int)ParallelGCThreads, 1); 1361 int n_queues = MAX2((int)ParallelGCThreads, 1);
1369 q->initialize(); 1376 q->initialize();
1370 _task_queues->register_queue(i, q); 1377 _task_queues->register_queue(i, q);
1371 } 1378 }
1372 1379
1373 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1380 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1374 _gc_alloc_regions[ap] = NULL; 1381 _gc_alloc_regions[ap] = NULL;
1375 _gc_alloc_region_counts[ap] = 0; 1382 _gc_alloc_region_counts[ap] = 0;
1376 } 1383 _retained_gc_alloc_regions[ap] = NULL;
1384 // by default, we do not retain a GC alloc region for each ap;
1385 // we'll override this, when appropriate, below
1386 _retain_gc_alloc_region[ap] = false;
1387 }
1388
1389 // We will try to remember the last half-full tenured region we
1390 // allocated to at the end of a collection so that we can re-use it
1391 // during the next collection.
1392 _retain_gc_alloc_region[GCAllocForTenured] = true;
1393
1377 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1394 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1378 } 1395 }
1379 1396
1380 jint G1CollectedHeap::initialize() { 1397 jint G1CollectedHeap::initialize() {
1381 os::enable_vtime(); 1398 os::enable_vtime();
2117 VerifyRegionClosure(bool allow_dirty, bool par = false) 2134 VerifyRegionClosure(bool allow_dirty, bool par = false)
2118 : _allow_dirty(allow_dirty), _par(par) {} 2135 : _allow_dirty(allow_dirty), _par(par) {}
2119 bool doHeapRegion(HeapRegion* r) { 2136 bool doHeapRegion(HeapRegion* r) {
2120 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, 2137 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2121 "Should be unclaimed at verify points."); 2138 "Should be unclaimed at verify points.");
2122 if (r->isHumongous()) { 2139 if (!r->continuesHumongous()) {
2123 if (r->startsHumongous()) {
2124 // Verify the single H object.
2125 oop(r->bottom())->verify();
2126 size_t word_sz = oop(r->bottom())->size();
2127 guarantee(r->top() == r->bottom() + word_sz,
2128 "Only one object in a humongous region");
2129 }
2130 } else {
2131 VerifyObjsInRegionClosure not_dead_yet_cl(r); 2140 VerifyObjsInRegionClosure not_dead_yet_cl(r);
2132 r->verify(_allow_dirty); 2141 r->verify(_allow_dirty);
2133 r->object_iterate(&not_dead_yet_cl); 2142 r->object_iterate(&not_dead_yet_cl);
2134 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), 2143 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
2135 "More live objects than counted in last complete marking."); 2144 "More live objects than counted in last complete marking.");
2177 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : 2186 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
2178 AbstractGangTask("Parallel verify task"), 2187 AbstractGangTask("Parallel verify task"),
2179 _g1h(g1h), _allow_dirty(allow_dirty) { } 2188 _g1h(g1h), _allow_dirty(allow_dirty) { }
2180 2189
2181 void work(int worker_i) { 2190 void work(int worker_i) {
2191 HandleMark hm;
2182 VerifyRegionClosure blk(_allow_dirty, true); 2192 VerifyRegionClosure blk(_allow_dirty, true);
2183 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, 2193 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
2184 HeapRegion::ParVerifyClaimValue); 2194 HeapRegion::ParVerifyClaimValue);
2185 } 2195 }
2186 }; 2196 };
2642 // We have to wait until now, because we don't want the region to 2652 // We have to wait until now, because we don't want the region to
2643 // be rescheduled for pop-evac during RS update. 2653 // be rescheduled for pop-evac during RS update.
2644 popular_region->set_popular_pending(false); 2654 popular_region->set_popular_pending(false);
2645 } 2655 }
2646 2656
2647 release_gc_alloc_regions(); 2657 release_gc_alloc_regions(false /* totally */);
2648 2658
2649 cleanup_surviving_young_words(); 2659 cleanup_surviving_young_words();
2650 2660
2651 if (g1_policy()->in_young_gc_mode()) { 2661 if (g1_policy()->in_young_gc_mode()) {
2652 _young_list->reset_sampled_info(); 2662 _young_list->reset_sampled_info();
2695 assert(regions_accounted_for(), "Region leakage."); 2705 assert(regions_accounted_for(), "Region leakage.");
2696 2706
2697 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 2707 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
2698 HandleMark hm; // Discard invalid handles created during verification 2708 HandleMark hm; // Discard invalid handles created during verification
2699 gclog_or_tty->print(" VerifyAfterGC:"); 2709 gclog_or_tty->print(" VerifyAfterGC:");
2710 prepare_for_verify();
2700 Universe::verify(false); 2711 Universe::verify(false);
2701 } 2712 }
2702 2713
2703 if (was_enabled) ref_processor()->enable_discovery(); 2714 if (was_enabled) ref_processor()->enable_discovery();
2704 2715
2733 } 2744 }
2734 } 2745 }
2735 2746
2736 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 2747 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
2737 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); 2748 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
2749 // make sure we don't call set_gc_alloc_region() multiple times on
2750 // the same region
2751 assert(r == NULL || !r->is_gc_alloc_region(),
2752 "shouldn't already be a GC alloc region");
2738 HeapWord* original_top = NULL; 2753 HeapWord* original_top = NULL;
2739 if (r != NULL) 2754 if (r != NULL)
2740 original_top = r->top(); 2755 original_top = r->top();
2741 2756
2742 // We will want to record the used space in r as being there before gc. 2757 // We will want to record the used space in r as being there before gc.
2822 void G1CollectedHeap::forget_alloc_region_list() { 2837 void G1CollectedHeap::forget_alloc_region_list() {
2823 assert(Thread::current()->is_VM_thread(), "Precondition"); 2838 assert(Thread::current()->is_VM_thread(), "Precondition");
2824 while (_gc_alloc_region_list != NULL) { 2839 while (_gc_alloc_region_list != NULL) {
2825 HeapRegion* r = _gc_alloc_region_list; 2840 HeapRegion* r = _gc_alloc_region_list;
2826 assert(r->is_gc_alloc_region(), "Invariant."); 2841 assert(r->is_gc_alloc_region(), "Invariant.");
2842 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
2843 // newly allocated data in order to be able to apply deferred updates
2844 // before the GC is done for verification purposes (i.e to allow
2845 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
2846 // collection.
2847 r->ContiguousSpace::set_saved_mark();
2827 _gc_alloc_region_list = r->next_gc_alloc_region(); 2848 _gc_alloc_region_list = r->next_gc_alloc_region();
2828 r->set_next_gc_alloc_region(NULL); 2849 r->set_next_gc_alloc_region(NULL);
2829 r->set_is_gc_alloc_region(false); 2850 r->set_is_gc_alloc_region(false);
2830 if (r->is_survivor()) { 2851 if (r->is_survivor()) {
2831 if (r->is_empty()) { 2852 if (r->is_empty()) {
2849 // TODO: allocation regions check 2870 // TODO: allocation regions check
2850 return true; 2871 return true;
2851 } 2872 }
2852 2873
2853 void G1CollectedHeap::get_gc_alloc_regions() { 2874 void G1CollectedHeap::get_gc_alloc_regions() {
2875 // First, let's check that the GC alloc region list is empty (it should)
2876 assert(_gc_alloc_region_list == NULL, "invariant");
2877
2854 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2878 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2879 assert(_gc_alloc_regions[ap] == NULL, "invariant");
2880
2855 // Create new GC alloc regions. 2881 // Create new GC alloc regions.
2856 HeapRegion* alloc_region = _gc_alloc_regions[ap]; 2882 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
2857 // Clear this alloc region, so that in case it turns out to be 2883 _retained_gc_alloc_regions[ap] = NULL;
2858 // unacceptable, we end up with no allocation region, rather than a bad 2884
2859 // one. 2885 if (alloc_region != NULL) {
2860 _gc_alloc_regions[ap] = NULL; 2886 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
2861 if (alloc_region == NULL || alloc_region->in_collection_set()) { 2887
2862 // Can't re-use old one. Allocate a new one. 2888 // let's make sure that the GC alloc region is not tagged as such
2889 // outside a GC operation
2890 assert(!alloc_region->is_gc_alloc_region(), "sanity");
2891
2892 if (alloc_region->in_collection_set() ||
2893 alloc_region->top() == alloc_region->end() ||
2894 alloc_region->top() == alloc_region->bottom()) {
2895 // we will discard the current GC alloc region if it's in the
2896 // collection set (it can happen!), if it's already full (no
2897 // point in using it), or if it's empty (this means that it
2898 // was emptied during a cleanup and it should be on the free
2899 // list now).
2900
2901 alloc_region = NULL;
2902 }
2903 }
2904
2905 if (alloc_region == NULL) {
2906 // we will get a new GC alloc region
2863 alloc_region = newAllocRegionWithExpansion(ap, 0); 2907 alloc_region = newAllocRegionWithExpansion(ap, 0);
2864 } 2908 }
2909
2865 if (alloc_region != NULL) { 2910 if (alloc_region != NULL) {
2911 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
2866 set_gc_alloc_region(ap, alloc_region); 2912 set_gc_alloc_region(ap, alloc_region);
2867 } 2913 }
2914
2915 assert(_gc_alloc_regions[ap] == NULL ||
2916 _gc_alloc_regions[ap]->is_gc_alloc_region(),
2917 "the GC alloc region should be tagged as such");
2918 assert(_gc_alloc_regions[ap] == NULL ||
2919 _gc_alloc_regions[ap] == _gc_alloc_region_list,
2920 "the GC alloc region should be the same as the GC alloc list head");
2868 } 2921 }
2869 // Set alternative regions for allocation purposes that have reached 2922 // Set alternative regions for allocation purposes that have reached
2870 // thier limit. 2923 // their limit.
2871 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2924 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2872 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); 2925 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
2873 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { 2926 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
2874 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; 2927 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
2875 } 2928 }
2876 } 2929 }
2877 assert(check_gc_alloc_regions(), "alloc regions messed up"); 2930 assert(check_gc_alloc_regions(), "alloc regions messed up");
2878 } 2931 }
2879 2932
2880 void G1CollectedHeap::release_gc_alloc_regions() { 2933 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
2881 // We keep a separate list of all regions that have been alloc regions in 2934 // We keep a separate list of all regions that have been alloc regions in
2882 // the current collection pause. Forget that now. 2935 // the current collection pause. Forget that now. This method will
2936 // untag the GC alloc regions and tear down the GC alloc region
2937 // list. It's desirable that no regions are tagged as GC alloc
2938 // outside GCs.
2883 forget_alloc_region_list(); 2939 forget_alloc_region_list();
2884 2940
2885 // The current alloc regions contain objs that have survived 2941 // The current alloc regions contain objs that have survived
2886 // collection. Make them no longer GC alloc regions. 2942 // collection. Make them no longer GC alloc regions.
2887 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2943 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2888 HeapRegion* r = _gc_alloc_regions[ap]; 2944 HeapRegion* r = _gc_alloc_regions[ap];
2889 if (r != NULL && r->is_empty()) { 2945 _retained_gc_alloc_regions[ap] = NULL;
2890 { 2946
2947 if (r != NULL) {
2948 // we retain nothing on _gc_alloc_regions between GCs
2949 set_gc_alloc_region(ap, NULL);
2950 _gc_alloc_region_counts[ap] = 0;
2951
2952 if (r->is_empty()) {
2953 // we didn't actually allocate anything in it; let's just put
2954 // it on the free list
2891 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 2955 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
2892 r->set_zero_fill_complete(); 2956 r->set_zero_fill_complete();
2893 put_free_region_on_list_locked(r); 2957 put_free_region_on_list_locked(r);
2958 } else if (_retain_gc_alloc_region[ap] && !totally) {
2959 // retain it so that we can use it at the beginning of the next GC
2960 _retained_gc_alloc_regions[ap] = r;
2894 } 2961 }
2895 } 2962 }
2896 // set_gc_alloc_region will also NULLify all aliases to the region 2963 }
2897 set_gc_alloc_region(ap, NULL); 2964 }
2898 _gc_alloc_region_counts[ap] = 0; 2965
2899 } 2966 #ifndef PRODUCT
2900 } 2967 // Useful for debugging
2968
2969 void G1CollectedHeap::print_gc_alloc_regions() {
2970 gclog_or_tty->print_cr("GC alloc regions");
2971 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2972 HeapRegion* r = _gc_alloc_regions[ap];
2973 if (r == NULL) {
2974 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL);
2975 } else {
2976 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT,
2977 ap, r->bottom(), r->used());
2978 }
2979 }
2980 }
2981 #endif // PRODUCT
2901 2982
2902 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 2983 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
2903 _drain_in_progress = false; 2984 _drain_in_progress = false;
2904 set_evac_failure_closure(cl); 2985 set_evac_failure_closure(cl);
2905 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 2986 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3656 3737
3657 DirtyCardQueue& dirty_card_queue() { return _dcq; } 3738 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3658 CardTableModRefBS* ctbs() { return _ct_bs; } 3739 CardTableModRefBS* ctbs() { return _ct_bs; }
3659 3740
3660 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { 3741 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3661 _g1_rem->par_write_ref(from, p, tid); 3742 if (!from->is_survivor()) {
3743 _g1_rem->par_write_ref(from, p, tid);
3744 }
3662 } 3745 }
3663 3746
3664 void deferred_rs_update(HeapRegion* from, oop* p, int tid) { 3747 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3665 // If the new value of the field points to the same region or 3748 // If the new value of the field points to the same region or
3666 // is the to-space, we don't need to include it in the Rset updates. 3749 // is the to-space, we don't need to include it in the Rset updates.