Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 4783:023652e49ac0
7121496: G1: do the per-region evacuation failure handling work in parallel
Summary: Parallelize the removal of self forwarding pointers etc. by wrapping in a HeapRegion closure, which is then wrapped inside an AbstractGangTask.
Reviewed-by: tonyp, iveresov
author | johnc |
---|---|
date | Fri, 23 Dec 2011 11:14:18 -0800 |
parents | bacb651cf5bf |
children | 02838862dec8 |
comparison
equal
deleted
inserted
replaced
4782:5fd354a959c5 | 4783:023652e49ac0 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" |
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" | 31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | 33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
34 #include "gc_implementation/g1/g1ErgoVerbose.hpp" | 34 #include "gc_implementation/g1/g1ErgoVerbose.hpp" |
35 #include "gc_implementation/g1/g1EvacFailure.hpp" | |
35 #include "gc_implementation/g1/g1MarkSweep.hpp" | 36 #include "gc_implementation/g1/g1MarkSweep.hpp" |
36 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 37 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
37 #include "gc_implementation/g1/g1RemSet.inline.hpp" | 38 #include "gc_implementation/g1/g1RemSet.inline.hpp" |
38 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 39 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
39 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 40 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
2616 r->set_claim_value(HeapRegion::InitialClaimValue); | 2617 r->set_claim_value(HeapRegion::InitialClaimValue); |
2617 return false; | 2618 return false; |
2618 } | 2619 } |
2619 }; | 2620 }; |
2620 | 2621 |
2621 void | 2622 void G1CollectedHeap::reset_heap_region_claim_values() { |
2622 G1CollectedHeap::reset_heap_region_claim_values() { | |
2623 ResetClaimValuesClosure blk; | 2623 ResetClaimValuesClosure blk; |
2624 heap_region_iterate(&blk); | 2624 heap_region_iterate(&blk); |
2625 } | |
2626 | |
2627 void G1CollectedHeap::reset_cset_heap_region_claim_values() { | |
2628 ResetClaimValuesClosure blk; | |
2629 collection_set_iterate(&blk); | |
2625 } | 2630 } |
2626 | 2631 |
2627 #ifdef ASSERT | 2632 #ifdef ASSERT |
2628 // This checks whether all regions in the heap have the correct claim | 2633 // This checks whether all regions in the heap have the correct claim |
2629 // value. I also piggy-backed on this a check to ensure that the | 2634 // value. I also piggy-backed on this a check to ensure that the |
3996 assert(!_drain_in_progress, "Postcondition"); | 4001 assert(!_drain_in_progress, "Postcondition"); |
3997 delete _evac_failure_scan_stack; | 4002 delete _evac_failure_scan_stack; |
3998 _evac_failure_scan_stack = NULL; | 4003 _evac_failure_scan_stack = NULL; |
3999 } | 4004 } |
4000 | 4005 |
4001 class UpdateRSetDeferred : public OopsInHeapRegionClosure { | |
4002 private: | |
4003 G1CollectedHeap* _g1; | |
4004 DirtyCardQueue *_dcq; | |
4005 CardTableModRefBS* _ct_bs; | |
4006 | |
4007 public: | |
4008 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : | |
4009 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} | |
4010 | |
4011 virtual void do_oop(narrowOop* p) { do_oop_work(p); } | |
4012 virtual void do_oop( oop* p) { do_oop_work(p); } | |
4013 template <class T> void do_oop_work(T* p) { | |
4014 assert(_from->is_in_reserved(p), "paranoia"); | |
4015 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && | |
4016 !_from->is_survivor()) { | |
4017 size_t card_index = _ct_bs->index_for(p); | |
4018 if (_ct_bs->mark_card_deferred(card_index)) { | |
4019 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); | |
4020 } | |
4021 } | |
4022 } | |
4023 }; | |
4024 | |
4025 class RemoveSelfPointerClosure: public ObjectClosure { | |
4026 private: | |
4027 G1CollectedHeap* _g1; | |
4028 ConcurrentMark* _cm; | |
4029 HeapRegion* _hr; | |
4030 size_t _prev_marked_bytes; | |
4031 size_t _next_marked_bytes; | |
4032 OopsInHeapRegionClosure *_cl; | |
4033 public: | |
4034 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, | |
4035 OopsInHeapRegionClosure* cl) : | |
4036 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), | |
4037 _next_marked_bytes(0), _cl(cl) {} | |
4038 | |
4039 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
4040 size_t next_marked_bytes() { return _next_marked_bytes; } | |
4041 | |
4042 // <original comment> | |
4043 // The original idea here was to coalesce evacuated and dead objects. | |
4044 // However that caused complications with the block offset table (BOT). | |
4045 // In particular if there were two TLABs, one of them partially refined. | |
4046 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| | |
4047 // The BOT entries of the unrefined part of TLAB_2 point to the start | |
4048 // of TLAB_2. If the last object of the TLAB_1 and the first object | |
4049 // of TLAB_2 are coalesced, then the cards of the unrefined part | |
4050 // would point into middle of the filler object. | |
4051 // The current approach is to not coalesce and leave the BOT contents intact. | |
4052 // </original comment> | |
4053 // | |
4054 // We now reset the BOT when we start the object iteration over the | |
4055 // region and refine its entries for every object we come across. So | |
4056 // the above comment is not really relevant and we should be able | |
4057 // to coalesce dead objects if we want to. | |
4058 void do_object(oop obj) { | |
4059 HeapWord* obj_addr = (HeapWord*) obj; | |
4060 assert(_hr->is_in(obj_addr), "sanity"); | |
4061 size_t obj_size = obj->size(); | |
4062 _hr->update_bot_for_object(obj_addr, obj_size); | |
4063 if (obj->is_forwarded() && obj->forwardee() == obj) { | |
4064 // The object failed to move. | |
4065 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); | |
4066 _cm->markPrev(obj); | |
4067 assert(_cm->isPrevMarked(obj), "Should be marked!"); | |
4068 _prev_marked_bytes += (obj_size * HeapWordSize); | |
4069 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { | |
4070 _cm->markAndGrayObjectIfNecessary(obj); | |
4071 } | |
4072 obj->set_mark(markOopDesc::prototype()); | |
4073 // While we were processing RSet buffers during the | |
4074 // collection, we actually didn't scan any cards on the | |
4075 // collection set, since we didn't want to update remebered | |
4076 // sets with entries that point into the collection set, given | |
4077 // that live objects fromthe collection set are about to move | |
4078 // and such entries will be stale very soon. This change also | |
4079 // dealt with a reliability issue which involved scanning a | |
4080 // card in the collection set and coming across an array that | |
4081 // was being chunked and looking malformed. The problem is | |
4082 // that, if evacuation fails, we might have remembered set | |
4083 // entries missing given that we skipped cards on the | |
4084 // collection set. So, we'll recreate such entries now. | |
4085 obj->oop_iterate(_cl); | |
4086 assert(_cm->isPrevMarked(obj), "Should be marked!"); | |
4087 } else { | |
4088 // The object has been either evacuated or is dead. Fill it with a | |
4089 // dummy object. | |
4090 MemRegion mr((HeapWord*)obj, obj_size); | |
4091 CollectedHeap::fill_with_object(mr); | |
4092 _cm->clearRangeBothMaps(mr); | |
4093 } | |
4094 } | |
4095 }; | |
4096 | |
4097 void G1CollectedHeap::remove_self_forwarding_pointers() { | 4006 void G1CollectedHeap::remove_self_forwarding_pointers() { |
4098 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); | 4007 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); |
4099 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); | 4008 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
4100 UpdateRSetDeferred deferred_update(_g1h, &dcq); | 4009 |
4101 OopsInHeapRegionClosure *cl; | 4010 G1ParRemoveSelfForwardPtrsTask rsfp_task(this); |
4102 if (G1DeferredRSUpdate) { | 4011 |
4103 cl = &deferred_update; | 4012 if (G1CollectedHeap::use_parallel_gc_threads()) { |
4013 set_par_threads(); | |
4014 workers()->run_task(&rsfp_task); | |
4015 set_par_threads(0); | |
4104 } else { | 4016 } else { |
4105 cl = &immediate_update; | 4017 rsfp_task.work(0); |
4106 } | 4018 } |
4107 HeapRegion* cur = g1_policy()->collection_set(); | 4019 |
4108 while (cur != NULL) { | 4020 assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity"); |
4109 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | 4021 |
4110 assert(!cur->isHumongous(), "sanity"); | 4022 // Reset the claim values in the regions in the collection set. |
4111 | 4023 reset_cset_heap_region_claim_values(); |
4112 if (cur->evacuation_failed()) { | 4024 |
4113 assert(cur->in_collection_set(), "bad CS"); | 4025 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); |
4114 RemoveSelfPointerClosure rspc(_g1h, cur, cl); | |
4115 | |
4116 // In the common case we make sure that this is done when the | |
4117 // region is freed so that it is "ready-to-go" when it's | |
4118 // re-allocated. However, when evacuation failure happens, a | |
4119 // region will remain in the heap and might ultimately be added | |
4120 // to a CSet in the future. So we have to be careful here and | |
4121 // make sure the region's RSet is ready for parallel iteration | |
4122 // whenever this might be required in the future. | |
4123 cur->rem_set()->reset_for_par_iteration(); | |
4124 cur->reset_bot(); | |
4125 cl->set_region(cur); | |
4126 cur->object_iterate(&rspc); | |
4127 | |
4128 // A number of manipulations to make the TAMS be the current top, | |
4129 // and the marked bytes be the ones observed in the iteration. | |
4130 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
4131 // The comments below are the postconditions achieved by the | |
4132 // calls. Note especially the last such condition, which says that | |
4133 // the count of marked bytes has been properly restored. | |
4134 cur->note_start_of_marking(false); | |
4135 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
4136 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
4137 // _next_marked_bytes == prev_marked_bytes. | |
4138 cur->note_end_of_marking(); | |
4139 // _prev_top_at_mark_start == top(), | |
4140 // _prev_marked_bytes == prev_marked_bytes | |
4141 } | |
4142 // If there is no mark in progress, we modified the _next variables | |
4143 // above needlessly, but harmlessly. | |
4144 if (_g1h->mark_in_progress()) { | |
4145 cur->note_start_of_marking(false); | |
4146 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
4147 // _next_marked_bytes == next_marked_bytes. | |
4148 } | |
4149 } | |
4150 cur = cur->next_in_collection_set(); | |
4151 } | |
4152 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | 4026 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
4153 | 4027 |
4154 // Now restore saved marks, if any. | 4028 // Now restore saved marks, if any. |
4155 if (_objs_with_preserved_marks != NULL) { | 4029 if (_objs_with_preserved_marks != NULL) { |
4156 assert(_preserved_marks_of_objs != NULL, "Both or none."); | 4030 assert(_preserved_marks_of_objs != NULL, "Both or none."); |
4159 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | 4033 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { |
4160 oop obj = _objs_with_preserved_marks->at(i); | 4034 oop obj = _objs_with_preserved_marks->at(i); |
4161 markOop m = _preserved_marks_of_objs->at(i); | 4035 markOop m = _preserved_marks_of_objs->at(i); |
4162 obj->set_mark(m); | 4036 obj->set_mark(m); |
4163 } | 4037 } |
4038 | |
4164 // Delete the preserved marks growable arrays (allocated on the C heap). | 4039 // Delete the preserved marks growable arrays (allocated on the C heap). |
4165 delete _objs_with_preserved_marks; | 4040 delete _objs_with_preserved_marks; |
4166 delete _preserved_marks_of_objs; | 4041 delete _preserved_marks_of_objs; |
4167 _objs_with_preserved_marks = NULL; | 4042 _objs_with_preserved_marks = NULL; |
4168 _preserved_marks_of_objs = NULL; | 4043 _preserved_marks_of_objs = NULL; |