Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3973:663cb89032b1
7092412: G1: Some roots not marked during an initial mark that gets an evacuation failure
Summary: As a result of the changes for 7080389, an evacuation failure during an initial mark pause may result in some root objects not being marked. Pass whether the caller is a root scanning closure into the evacuation failure handling code so that the thread that successfully forwards an object to itself also marks the object.
Reviewed-by: ysr, brutisso, tonyp
author | johnc |
---|---|
date | Tue, 20 Sep 2011 15:39:17 -0700 |
parents | af2ab04e0038 |
children | 4dfb2df418f2 |
comparison
equal
deleted
inserted
replaced
3972:4f93f0d00802 | 3973:663cb89032b1 |
---|---|
3944 } | 3944 } |
3945 } | 3945 } |
3946 | 3946 |
3947 oop | 3947 oop |
3948 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | 3948 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
3949 oop old) { | 3949 oop old, |
3950 bool should_mark_root) { | |
3950 assert(obj_in_cs(old), | 3951 assert(obj_in_cs(old), |
3951 err_msg("obj: "PTR_FORMAT" should still be in the CSet", | 3952 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
3952 (HeapWord*) old)); | 3953 (HeapWord*) old)); |
3953 markOop m = old->mark(); | 3954 markOop m = old->mark(); |
3954 oop forward_ptr = old->forward_to_atomic(old); | 3955 oop forward_ptr = old->forward_to_atomic(old); |
3955 if (forward_ptr == NULL) { | 3956 if (forward_ptr == NULL) { |
3956 // Forward-to-self succeeded. | 3957 // Forward-to-self succeeded. |
3958 | |
3959 // should_mark_root will be true when this routine is called | |
3960 // from a root scanning closure during an initial mark pause. | |
3961 // In this case the thread that succeeds in self-forwarding the | |
3962 // object is also responsible for marking the object. | |
3963 if (should_mark_root) { | |
3964 assert(!oopDesc::is_null(old), "shouldn't be"); | |
3965 _cm->grayRoot(old); | |
3966 } | |
3967 | |
3957 if (_evac_failure_closure != cl) { | 3968 if (_evac_failure_closure != cl) { |
3958 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | 3969 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
3959 assert(!_drain_in_progress, | 3970 assert(!_drain_in_progress, |
3960 "Should only be true while someone holds the lock."); | 3971 "Should only be true while someone holds the lock."); |
3961 // Set the global evac-failure closure to the current thread's. | 3972 // Set the global evac-failure closure to the current thread's. |
4206 _cm->grayRoot(oop(addr)); | 4217 _cm->grayRoot(oop(addr)); |
4207 } | 4218 } |
4208 } | 4219 } |
4209 } | 4220 } |
4210 | 4221 |
4211 oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_copy) { | 4222 oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_root, |
4223 bool should_mark_copy) { | |
4212 size_t word_sz = old->size(); | 4224 size_t word_sz = old->size(); |
4213 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | 4225 HeapRegion* from_region = _g1->heap_region_containing_raw(old); |
4214 // +1 to make the -1 indexes valid... | 4226 // +1 to make the -1 indexes valid... |
4215 int young_index = from_region->young_index_in_cset()+1; | 4227 int young_index = from_region->young_index_in_cset()+1; |
4216 assert( (from_region->is_young() && young_index > 0) || | 4228 assert( (from_region->is_young() && young_index > 0) || |
4226 | 4238 |
4227 if (obj_ptr == NULL) { | 4239 if (obj_ptr == NULL) { |
4228 // This will either forward-to-self, or detect that someone else has | 4240 // This will either forward-to-self, or detect that someone else has |
4229 // installed a forwarding pointer. | 4241 // installed a forwarding pointer. |
4230 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | 4242 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); |
4231 return _g1->handle_evacuation_failure_par(cl, old); | 4243 return _g1->handle_evacuation_failure_par(cl, old, should_mark_root); |
4232 } | 4244 } |
4233 | 4245 |
4234 // We're going to allocate linearly, so might as well prefetch ahead. | 4246 // We're going to allocate linearly, so might as well prefetch ahead. |
4235 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | 4247 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); |
4236 | 4248 |
4328 // If we are a root scanning closure during an initial | 4340 // If we are a root scanning closure during an initial |
4329 // mark pause (i.e. do_mark_object will be true) then | 4341 // mark pause (i.e. do_mark_object will be true) then |
4330 // we also need to handle marking of roots in the | 4342 // we also need to handle marking of roots in the |
4331 // event of an evacuation failure. In the event of an | 4343 // event of an evacuation failure. In the event of an |
4332 // evacuation failure, the object is forwarded to itself | 4344 // evacuation failure, the object is forwarded to itself |
4333 // and not copied so let's mark it here. | 4345 // and not copied. For root-scanning closures, the |
4346 // object would be marked after a successful self-forward | |
4347 // but an object could be pointed to by both a root and non | |
4348 // root location and be self-forwarded by a non-root-scanning | |
4349 // closure. Therefore we also have to attempt to mark the | |
4350 // self-forwarded root object here. | |
4334 if (do_mark_object && obj->forwardee() == obj) { | 4351 if (do_mark_object && obj->forwardee() == obj) { |
4335 mark_object(p); | 4352 mark_object(p); |
4336 } | 4353 } |
4337 } else { | 4354 } else { |
4355 // During an initial mark pause, objects that are pointed to | |
4356 // by the roots need to be marked - even in the event of an | |
4357 // evacuation failure. We pass the template parameter | |
4358 // do_mark_object (which is true for root scanning closures | |
4359 // during an initial mark pause) to copy_to_survivor_space | |
4360 // which will pass it on to the evacuation failure handling | |
4361 // code. The thread that successfully self-forwards a root | |
4362 // object to itself is responsible for marking the object. | |
4363 bool should_mark_root = do_mark_object; | |
4364 | |
4338 // We need to mark the copied object if we're a root scanning | 4365 // We need to mark the copied object if we're a root scanning |
4339 // closure during an initial mark pause (i.e. do_mark_object | 4366 // closure during an initial mark pause (i.e. do_mark_object |
4340 // will be true), or the object is already marked and we need | 4367 // will be true), or the object is already marked and we need |
4341 // to propagate the mark to the evacuated copy. | 4368 // to propagate the mark to the evacuated copy. |
4342 bool should_mark_copy = do_mark_object || | 4369 bool should_mark_copy = do_mark_object || |
4343 _during_initial_mark || | 4370 _during_initial_mark || |
4344 (_mark_in_progress && !_g1->is_obj_ill(obj)); | 4371 (_mark_in_progress && !_g1->is_obj_ill(obj)); |
4345 | 4372 |
4346 oop copy_oop = copy_to_survivor_space(obj, should_mark_copy); | 4373 oop copy_oop = copy_to_survivor_space(obj, should_mark_root, |
4374 should_mark_copy); | |
4347 oopDesc::encode_store_heap_oop(p, copy_oop); | 4375 oopDesc::encode_store_heap_oop(p, copy_oop); |
4348 } | 4376 } |
4349 // When scanning the RS, we only care about objs in CS. | 4377 // When scanning the RS, we only care about objs in CS. |
4350 if (barrier == G1BarrierRS) { | 4378 if (barrier == G1BarrierRS) { |
4351 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); | 4379 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |