comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 4787:2ace1c4ee8da

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object). Reviewed-by: johnc, brutisso
author tonyp
date Tue, 10 Jan 2012 18:58:13 -0500
parents 023652e49ac0
children 2e966d967c5c
comparison
equal deleted inserted replaced
4786:1d6185f732aa 4787:2ace1c4ee8da
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
33 #include "gc_implementation/g1/g1RemSet.hpp" 33 #include "gc_implementation/g1/g1RemSet.hpp"
34 #include "gc_implementation/g1/heapRegion.inline.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 36 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
36 #include "gc_implementation/shared/vmGCOperations.hpp" 37 #include "gc_implementation/shared/vmGCOperations.hpp"
37 #include "memory/genOopClosures.inline.hpp" 38 #include "memory/genOopClosures.inline.hpp"
38 #include "memory/referencePolicy.hpp" 39 #include "memory/referencePolicy.hpp"
181 {} 182 {}
182 183
183 void CMMarkStack::allocate(size_t size) { 184 void CMMarkStack::allocate(size_t size) {
184 _base = NEW_C_HEAP_ARRAY(oop, size); 185 _base = NEW_C_HEAP_ARRAY(oop, size);
185 if (_base == NULL) { 186 if (_base == NULL) {
186 vm_exit_during_initialization("Failed to allocate " 187 vm_exit_during_initialization("Failed to allocate CM region mark stack");
187 "CM region mark stack");
188 } 188 }
189 _index = 0; 189 _index = 0;
190 _capacity = (jint) size; 190 _capacity = (jint) size;
191 _oops_do_bound = -1; 191 _saved_index = -1;
192 NOT_PRODUCT(_max_depth = 0); 192 NOT_PRODUCT(_max_depth = 0);
193 } 193 }
194 194
195 CMMarkStack::~CMMarkStack() { 195 CMMarkStack::~CMMarkStack() {
196 if (_base != NULL) { 196 if (_base != NULL) {
281 *n = k; 281 *n = k;
282 return true; 282 return true;
283 } 283 }
284 } 284 }
285 285
286
287 CMRegionStack::CMRegionStack() : _base(NULL) {} 286 CMRegionStack::CMRegionStack() : _base(NULL) {}
288 287
289 void CMRegionStack::allocate(size_t size) { 288 void CMRegionStack::allocate(size_t size) {
290 _base = NEW_C_HEAP_ARRAY(MemRegion, size); 289 _base = NEW_C_HEAP_ARRAY(MemRegion, size);
291 if (_base == NULL) { 290 if (_base == NULL) {
300 FREE_C_HEAP_ARRAY(oop, _base); 299 FREE_C_HEAP_ARRAY(oop, _base);
301 } 300 }
302 } 301 }
303 302
304 void CMRegionStack::push_lock_free(MemRegion mr) { 303 void CMRegionStack::push_lock_free(MemRegion mr) {
304 guarantee(false, "push_lock_free(): don't call this any more");
305
305 assert(mr.word_size() > 0, "Precondition"); 306 assert(mr.word_size() > 0, "Precondition");
306 while (true) { 307 while (true) {
307 jint index = _index; 308 jint index = _index;
308 309
309 if (index >= _capacity) { 310 if (index >= _capacity) {
323 324
324 // Lock-free pop of the region stack. Called during the concurrent 325 // Lock-free pop of the region stack. Called during the concurrent
325 // marking / remark phases. Should only be called in tandem with 326 // marking / remark phases. Should only be called in tandem with
326 // other lock-free pops. 327 // other lock-free pops.
327 MemRegion CMRegionStack::pop_lock_free() { 328 MemRegion CMRegionStack::pop_lock_free() {
329 guarantee(false, "pop_lock_free(): don't call this any more");
330
328 while (true) { 331 while (true) {
329 jint index = _index; 332 jint index = _index;
330 333
331 if (index == 0) { 334 if (index == 0) {
332 return MemRegion(); 335 return MemRegion();
388 } 391 }
389 } 392 }
390 #endif 393 #endif
391 394
392 bool CMRegionStack::invalidate_entries_into_cset() { 395 bool CMRegionStack::invalidate_entries_into_cset() {
396 guarantee(false, "invalidate_entries_into_cset(): don't call this any more");
397
393 bool result = false; 398 bool result = false;
394 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 399 G1CollectedHeap* g1h = G1CollectedHeap::heap();
395 for (int i = 0; i < _oops_do_bound; ++i) { 400 for (int i = 0; i < _oops_do_bound; ++i) {
396 MemRegion mr = _base[i]; 401 MemRegion mr = _base[i];
397 if (mr.start() != NULL) { 402 if (mr.start() != NULL) {
436 } 441 }
437 debug_only(_drain_in_progress = false); 442 debug_only(_drain_in_progress = false);
438 return res; 443 return res;
439 } 444 }
440 445
446 void CMMarkStack::note_start_of_gc() {
447 assert(_saved_index == -1,
448 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
449 _saved_index = _index;
450 }
451
452 void CMMarkStack::note_end_of_gc() {
453 // This is intentionally a guarantee, instead of an assert. If we
454 // accidentally add something to the mark stack during GC, it
455 // will be a correctness issue so it's better if we crash. we'll
456 // only check this once per GC anyway, so it won't be a performance
457 // issue in any way.
458 guarantee(_saved_index == _index,
459 err_msg("saved index: %d index: %d", _saved_index, _index));
460 _saved_index = -1;
461 }
462
441 void CMMarkStack::oops_do(OopClosure* f) { 463 void CMMarkStack::oops_do(OopClosure* f) {
442 if (_index == 0) return; 464 assert(_saved_index == _index,
443 assert(_oops_do_bound != -1 && _oops_do_bound <= _index, 465 err_msg("saved index: %d index: %d", _saved_index, _index));
444 "Bound must be set."); 466 for (int i = 0; i < _index; i += 1) {
445 for (int i = 0; i < _oops_do_bound; i++) {
446 f->do_oop(&_base[i]); 467 f->do_oop(&_base[i]);
447 } 468 }
448 _oops_do_bound = -1;
449 } 469 }
450 470
451 bool ConcurrentMark::not_yet_marked(oop obj) const { 471 bool ConcurrentMark::not_yet_marked(oop obj) const {
452 return (_g1h->is_obj_ill(obj) 472 return (_g1h->is_obj_ill(obj)
453 || (_g1h->is_in_permanent(obj) 473 || (_g1h->is_in_permanent(obj)
781 801
782 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 802 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
783 public: 803 public:
784 bool doHeapRegion(HeapRegion* r) { 804 bool doHeapRegion(HeapRegion* r) {
785 if (!r->continuesHumongous()) { 805 if (!r->continuesHumongous()) {
786 r->note_start_of_marking(true); 806 r->note_start_of_marking();
787 } 807 }
788 return false; 808 return false;
789 } 809 }
790 }; 810 };
791 811
802 } 822 }
803 #endif 823 #endif
804 824
805 // Initialise marking structures. This has to be done in a STW phase. 825 // Initialise marking structures. This has to be done in a STW phase.
806 reset(); 826 reset();
827
828 // For each region note start of marking.
829 NoteStartOfMarkHRClosure startcl;
830 g1h->heap_region_iterate(&startcl);
807 } 831 }
808 832
809 833
810 void ConcurrentMark::checkpointRootsInitialPost() { 834 void ConcurrentMark::checkpointRootsInitialPost() {
811 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 835 G1CollectedHeap* g1h = G1CollectedHeap::heap();
815 // force an oveflow during remark we'll never actually complete the 839 // force an oveflow during remark we'll never actually complete the
816 // marking phase. So, we initilize this here, at the start of the 840 // marking phase. So, we initilize this here, at the start of the
817 // cycle, so that at the remaining overflow number will decrease at 841 // cycle, so that at the remaining overflow number will decrease at
818 // every remark and we'll eventually not need to cause one. 842 // every remark and we'll eventually not need to cause one.
819 force_overflow_stw()->init(); 843 force_overflow_stw()->init();
820
821 // For each region note start of marking.
822 NoteStartOfMarkHRClosure startcl;
823 g1h->heap_region_iterate(&startcl);
824 844
825 // Start Concurrent Marking weak-reference discovery. 845 // Start Concurrent Marking weak-reference discovery.
826 ReferenceProcessor* rp = g1h->ref_processor_cm(); 846 ReferenceProcessor* rp = g1h->ref_processor_cm();
827 // enable ("weak") refs discovery 847 // enable ("weak") refs discovery
828 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 848 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
944 return false; 964 return false;
945 } 965 }
946 } 966 }
947 #endif // !PRODUCT 967 #endif // !PRODUCT
948 968
949 void ConcurrentMark::grayRoot(oop p) {
950 HeapWord* addr = (HeapWord*) p;
951 // We can't really check against _heap_start and _heap_end, since it
952 // is possible during an evacuation pause with piggy-backed
953 // initial-mark that the committed space is expanded during the
954 // pause without CM observing this change. So the assertions below
955 // is a bit conservative; but better than nothing.
956 assert(_g1h->g1_committed().contains(addr),
957 "address should be within the heap bounds");
958
959 if (!_nextMarkBitMap->isMarked(addr)) {
960 _nextMarkBitMap->parMark(addr);
961 }
962 }
963
964 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { 969 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
970 guarantee(false, "grayRegionIfNecessary(): don't call this any more");
971
965 // The objects on the region have already been marked "in bulk" by 972 // The objects on the region have already been marked "in bulk" by
966 // the caller. We only need to decide whether to push the region on 973 // the caller. We only need to decide whether to push the region on
967 // the region stack or not. 974 // the region stack or not.
968 975
969 if (!concurrent_marking_in_progress() || !_should_gray_objects) { 976 if (!concurrent_marking_in_progress() || !_should_gray_objects) {
1005 } 1012 }
1006 } 1013 }
1007 } 1014 }
1008 1015
1009 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { 1016 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) {
1017 guarantee(false, "markAndGrayObjectIfNecessary(): don't call this any more");
1018
1010 // The object is not marked by the caller. We need to at least mark 1019 // The object is not marked by the caller. We need to at least mark
1011 // it and maybe push in on the stack. 1020 // it and maybe push in on the stack.
1012 1021
1013 HeapWord* addr = (HeapWord*)p; 1022 HeapWord* addr = (HeapWord*)p;
1014 if (!_nextMarkBitMap->isMarked(addr)) { 1023 if (!_nextMarkBitMap->isMarked(addr)) {
1222 // threads to have SATB queues with active set to true. 1231 // threads to have SATB queues with active set to true.
1223 satb_mq_set.set_active_all_threads(false, /* new active value */ 1232 satb_mq_set.set_active_all_threads(false, /* new active value */
1224 true /* expected_active */); 1233 true /* expected_active */);
1225 1234
1226 if (VerifyDuringGC) { 1235 if (VerifyDuringGC) {
1227
1228 HandleMark hm; // handle scope 1236 HandleMark hm; // handle scope
1229 gclog_or_tty->print(" VerifyDuringGC:(after)"); 1237 gclog_or_tty->print(" VerifyDuringGC:(after)");
1230 Universe::heap()->prepare_for_verify(); 1238 Universe::heap()->prepare_for_verify();
1231 Universe::verify(/* allow dirty */ true, 1239 Universe::verify(/* allow dirty */ true,
1232 /* silent */ false, 1240 /* silent */ false,
1877 1885
1878 // Statistics. 1886 // Statistics.
1879 double end = os::elapsedTime(); 1887 double end = os::elapsedTime();
1880 _cleanup_times.add((end - start) * 1000.0); 1888 _cleanup_times.add((end - start) * 1000.0);
1881 1889
1882 // G1CollectedHeap::heap()->print();
1883 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
1884 // G1CollectedHeap::heap()->get_gc_time_stamp());
1885
1886 if (PrintGC || PrintGCDetails) { 1890 if (PrintGC || PrintGCDetails) {
1887 g1h->print_size_transition(gclog_or_tty, 1891 g1h->print_size_transition(gclog_or_tty,
1888 start_used_bytes, 1892 start_used_bytes,
1889 g1h->used(), 1893 g1h->used(),
1890 g1h->capacity()); 1894 g1h->capacity());
2667 } 2671 }
2668 } 2672 }
2669 } 2673 }
2670 2674
2671 void ConcurrentMark::drainAllSATBBuffers() { 2675 void ConcurrentMark::drainAllSATBBuffers() {
2676 guarantee(false, "drainAllSATBBuffers(): don't call this any more");
2677
2672 CMGlobalObjectClosure oc(this); 2678 CMGlobalObjectClosure oc(this);
2673 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2679 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2674 satb_mq_set.set_closure(&oc); 2680 satb_mq_set.set_closure(&oc);
2675 2681
2676 while (satb_mq_set.apply_closure_to_completed_buffer()) { 2682 while (satb_mq_set.apply_closure_to_completed_buffer()) {
2683 // called during an evacuation pause 2689 // called during an evacuation pause
2684 satb_mq_set.iterate_closure_all_threads(); 2690 satb_mq_set.iterate_closure_all_threads();
2685 2691
2686 satb_mq_set.set_closure(NULL); 2692 satb_mq_set.set_closure(NULL);
2687 assert(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2693 assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
2688 }
2689
2690 void ConcurrentMark::markPrev(oop p) {
2691 // Note we are overriding the read-only view of the prev map here, via
2692 // the cast.
2693 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p);
2694 } 2694 }
2695 2695
2696 void ConcurrentMark::clear(oop p) { 2696 void ConcurrentMark::clear(oop p) {
2697 assert(p != NULL && p->is_oop(), "expected an oop"); 2697 assert(p != NULL && p->is_oop(), "expected an oop");
2698 HeapWord* addr = (HeapWord*)p; 2698 HeapWord* addr = (HeapWord*)p;
2700 addr < _nextMarkBitMap->endWord(), "in a region"); 2700 addr < _nextMarkBitMap->endWord(), "in a region");
2701 2701
2702 _nextMarkBitMap->clear(addr); 2702 _nextMarkBitMap->clear(addr);
2703 } 2703 }
2704 2704
2705 void ConcurrentMark::clearRangeBothMaps(MemRegion mr) { 2705 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2706 // Note we are overriding the read-only view of the prev map here, via 2706 // Note we are overriding the read-only view of the prev map here, via
2707 // the cast. 2707 // the cast.
2708 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2708 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2709 }
2710
2711 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2709 _nextMarkBitMap->clearRange(mr); 2712 _nextMarkBitMap->clearRange(mr);
2713 }
2714
2715 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2716 clearRangePrevBitmap(mr);
2717 clearRangeNextBitmap(mr);
2710 } 2718 }
2711 2719
2712 HeapRegion* 2720 HeapRegion*
2713 ConcurrentMark::claim_region(int task_num) { 2721 ConcurrentMark::claim_region(int task_num) {
2714 // "checkpoint" the finger 2722 // "checkpoint" the finger
2801 2809
2802 return NULL; 2810 return NULL;
2803 } 2811 }
2804 2812
2805 bool ConcurrentMark::invalidate_aborted_regions_in_cset() { 2813 bool ConcurrentMark::invalidate_aborted_regions_in_cset() {
2814 guarantee(false, "invalidate_aborted_regions_in_cset(): "
2815 "don't call this any more");
2816
2806 bool result = false; 2817 bool result = false;
2807 for (int i = 0; i < (int)_max_task_num; ++i) { 2818 for (int i = 0; i < (int)_max_task_num; ++i) {
2808 CMTask* the_task = _tasks[i]; 2819 CMTask* the_task = _tasks[i];
2809 MemRegion mr = the_task->aborted_region(); 2820 MemRegion mr = the_task->aborted_region();
2810 if (mr.start() != NULL) { 2821 if (mr.start() != NULL) {
2852 } 2863 }
2853 2864
2854 // ...then over the contents of the all the task queues. 2865 // ...then over the contents of the all the task queues.
2855 queue->oops_do(cl); 2866 queue->oops_do(cl);
2856 } 2867 }
2857 2868 }
2858 // Invalidate any entries, that are in the region stack, that 2869
2859 // point into the collection set 2870 #ifndef PRODUCT
2860 if (_regionStack.invalidate_entries_into_cset()) { 2871 enum VerifyNoCSetOopsPhase {
2861 // otherwise, any gray objects copied during the evacuation pause 2872 VerifyNoCSetOopsStack,
2862 // might not be visited. 2873 VerifyNoCSetOopsQueues,
2863 assert(_should_gray_objects, "invariant"); 2874 VerifyNoCSetOopsSATBCompleted,
2864 } 2875 VerifyNoCSetOopsSATBThread
2865 2876 };
2866 // Invalidate any aborted regions, recorded in the individual CM 2877
2867 // tasks, that point into the collection set. 2878 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
2868 if (invalidate_aborted_regions_in_cset()) { 2879 private:
2869 // otherwise, any gray objects copied during the evacuation pause 2880 G1CollectedHeap* _g1h;
2870 // might not be visited. 2881 VerifyNoCSetOopsPhase _phase;
2871 assert(_should_gray_objects, "invariant"); 2882 int _info;
2872 } 2883
2873 2884 const char* phase_str() {
2874 } 2885 switch (_phase) {
2886 case VerifyNoCSetOopsStack: return "Stack";
2887 case VerifyNoCSetOopsQueues: return "Queue";
2888 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2889 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
2890 default: ShouldNotReachHere();
2891 }
2892 return NULL;
2893 }
2894
2895 void do_object_work(oop obj) {
2896 guarantee(!_g1h->obj_in_cs(obj),
2897 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2898 (void*) obj, phase_str(), _info));
2899 }
2900
2901 public:
2902 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2903
2904 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2905 _phase = phase;
2906 _info = info;
2907 }
2908
2909 virtual void do_oop(oop* p) {
2910 oop obj = oopDesc::load_decode_heap_oop(p);
2911 do_object_work(obj);
2912 }
2913
2914 virtual void do_oop(narrowOop* p) {
2915 // We should not come across narrow oops while scanning marking
2916 // stacks and SATB buffers.
2917 ShouldNotReachHere();
2918 }
2919
2920 virtual void do_object(oop obj) {
2921 do_object_work(obj);
2922 }
2923 };
2924
2925 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2926 bool verify_enqueued_buffers,
2927 bool verify_thread_buffers,
2928 bool verify_fingers) {
2929 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2930 if (!G1CollectedHeap::heap()->mark_in_progress()) {
2931 return;
2932 }
2933
2934 VerifyNoCSetOopsClosure cl;
2935
2936 if (verify_stacks) {
2937 // Verify entries on the global mark stack
2938 cl.set_phase(VerifyNoCSetOopsStack);
2939 _markStack.oops_do(&cl);
2940
2941 // Verify entries on the task queues
2942 for (int i = 0; i < (int) _max_task_num; i += 1) {
2943 cl.set_phase(VerifyNoCSetOopsQueues, i);
2944 OopTaskQueue* queue = _task_queues->queue(i);
2945 queue->oops_do(&cl);
2946 }
2947 }
2948
2949 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2950
2951 // Verify entries on the enqueued SATB buffers
2952 if (verify_enqueued_buffers) {
2953 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2954 satb_qs.iterate_completed_buffers_read_only(&cl);
2955 }
2956
2957 // Verify entries on the per-thread SATB buffers
2958 if (verify_thread_buffers) {
2959 cl.set_phase(VerifyNoCSetOopsSATBThread);
2960 satb_qs.iterate_thread_buffers_read_only(&cl);
2961 }
2962
2963 if (verify_fingers) {
2964 // Verify the global finger
2965 HeapWord* global_finger = finger();
2966 if (global_finger != NULL && global_finger < _heap_end) {
2967 // The global finger always points to a heap region boundary. We
2968 // use heap_region_containing_raw() to get the containing region
2969 // given that the global finger could be pointing to a free region
2970 // which subsequently becomes continues humongous. If that
2971 // happens, heap_region_containing() will return the bottom of the
2972 // corresponding starts humongous region and the check below will
2973 // not hold any more.
2974 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2975 guarantee(global_finger == global_hr->bottom(),
2976 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2977 global_finger, HR_FORMAT_PARAMS(global_hr)));
2978 }
2979
2980 // Verify the task fingers
2981 assert(parallel_marking_threads() <= _max_task_num, "sanity");
2982 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2983 CMTask* task = _tasks[i];
2984 HeapWord* task_finger = task->finger();
2985 if (task_finger != NULL && task_finger < _heap_end) {
2986 // See above note on the global finger verification.
2987 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2988 guarantee(task_finger == task_hr->bottom() ||
2989 !task_hr->in_collection_set(),
2990 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2991 task_finger, HR_FORMAT_PARAMS(task_hr)));
2992 }
2993 }
2994 }
2995 }
2996 #endif // PRODUCT
2875 2997
2876 void ConcurrentMark::clear_marking_state(bool clear_overflow) { 2998 void ConcurrentMark::clear_marking_state(bool clear_overflow) {
2877 _markStack.setEmpty(); 2999 _markStack.setEmpty();
2878 _markStack.clear_overflow(); 3000 _markStack.clear_overflow();
2879 _regionStack.setEmpty(); 3001 _regionStack.setEmpty();
3097 _g1h->collection_set_iterate_from(hr, &cmplt); 3219 _g1h->collection_set_iterate_from(hr, &cmplt);
3098 } 3220 }
3099 }; 3221 };
3100 3222
3101 void ConcurrentMark::complete_marking_in_collection_set() { 3223 void ConcurrentMark::complete_marking_in_collection_set() {
3224 guarantee(false, "complete_marking_in_collection_set(): "
3225 "don't call this any more");
3226
3102 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3227 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3103 3228
3104 if (!g1h->mark_in_progress()) { 3229 if (!g1h->mark_in_progress()) {
3105 g1h->g1_policy()->record_mark_closure_time(0.0); 3230 g1h->g1_policy()->record_mark_closure_time(0.0);
3106 return; 3231 return;
3144 // location. There is a tricky situation with the gray objects in 3269 // location. There is a tricky situation with the gray objects in
3145 // region stack that are being coped, however. See the comment in 3270 // region stack that are being coped, however. See the comment in
3146 // newCSet(). 3271 // newCSet().
3147 3272
3148 void ConcurrentMark::newCSet() { 3273 void ConcurrentMark::newCSet() {
3274 guarantee(false, "newCSet(): don't call this any more");
3275
3149 if (!concurrent_marking_in_progress()) { 3276 if (!concurrent_marking_in_progress()) {
3150 // nothing to do if marking is not in progress 3277 // nothing to do if marking is not in progress
3151 return; 3278 return;
3152 } 3279 }
3153 3280
3182 _should_gray_objects = true; 3309 _should_gray_objects = true;
3183 } 3310 }
3184 } 3311 }
3185 3312
3186 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { 3313 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
3314 guarantee(false, "registerCSetRegion(): don't call this any more");
3315
3187 if (!concurrent_marking_in_progress()) return; 3316 if (!concurrent_marking_in_progress()) return;
3188 3317
3189 HeapWord* region_end = hr->end(); 3318 HeapWord* region_end = hr->end();
3190 if (region_end > _min_finger) { 3319 if (region_end > _min_finger) {
3191 _should_gray_objects = true; 3320 _should_gray_objects = true;
3193 } 3322 }
3194 3323
3195 // Resets the region fields of active CMTasks whose values point 3324 // Resets the region fields of active CMTasks whose values point
3196 // into the collection set. 3325 // into the collection set.
3197 void ConcurrentMark::reset_active_task_region_fields_in_cset() { 3326 void ConcurrentMark::reset_active_task_region_fields_in_cset() {
3327 guarantee(false, "reset_active_task_region_fields_in_cset(): "
3328 "don't call this any more");
3329
3198 assert(SafepointSynchronize::is_at_safepoint(), "should be in STW"); 3330 assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
3199 assert(parallel_marking_threads() <= _max_task_num, "sanity"); 3331 assert(parallel_marking_threads() <= _max_task_num, "sanity");
3200 3332
3201 for (int i = 0; i < (int)parallel_marking_threads(); i += 1) { 3333 for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
3202 CMTask* task = _tasks[i]; 3334 CMTask* task = _tasks[i];
3903 // limits to get the regular clock call early 4035 // limits to get the regular clock call early
3904 decrease_limits(); 4036 decrease_limits();
3905 } 4037 }
3906 4038
3907 void CMTask::drain_region_stack(BitMapClosure* bc) { 4039 void CMTask::drain_region_stack(BitMapClosure* bc) {
4040 assert(_cm->region_stack_empty(), "region stack should be empty");
4041 assert(_aborted_region.is_empty(), "aborted region should be empty");
4042 return;
4043
3908 if (has_aborted()) return; 4044 if (has_aborted()) return;
3909 4045
3910 assert(_region_finger == NULL, 4046 assert(_region_finger == NULL,
3911 "it should be NULL when we're not scanning a region"); 4047 "it should be NULL when we're not scanning a region");
3912 4048