# HG changeset patch # User ysr # Date 1265074141 28800 # Node ID 6484c4ee11cbb7b3216c731a5fe3dbe94eba0530 # Parent 745c853ee57f03ab198cd52ee790485938a5ee51 6904516: More object array barrier fixes, following up on 6906727 Summary: Fixed missing pre-barrier calls for G1, modified C1 to call pre- and correct post-barrier interfaces, deleted obsolete interface, (temporarily) disabled redundant deferred barrier in BacktraceBuilder. Reviewed-by: coleenp, jmasa, kvn, never diff -r 745c853ee57f -r 6484c4ee11cb src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Fri Jan 29 14:51:38 2010 -0800 +++ b/src/share/vm/c1/c1_Runtime1.cpp Mon Feb 01 17:29:01 2010 -0800 @@ -1075,6 +1075,7 @@ }; +// Below length is the # elements copied. template int obj_arraycopy_work(oopDesc* src, T* src_addr, oopDesc* dst, T* dst_addr, int length) { @@ -1083,22 +1084,22 @@ // barrier. The assert will fail if this is not the case. // Note that we use the non-virtual inlineable variant of write_ref_array. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->has_write_ref_array_opt(), - "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); if (src == dst) { // same object, no check + bs->write_ref_array_pre(dst_addr, length); Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array(MemRegion((HeapWord*)dst_addr, - (HeapWord*)(dst_addr + length))); + bs->write_ref_array((HeapWord*)dst_addr, length); return ac_ok; } else { klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { // Elements are guaranteed to be subtypes, so no check necessary + bs->write_ref_array_pre(dst_addr, length); Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array(MemRegion((HeapWord*)dst_addr, - (HeapWord*)(dst_addr + length))); + bs->write_ref_array((HeapWord*)dst_addr, length); return ac_ok; } } @@ -1162,9 +1163,16 @@ #endif if (num == 0) return; + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); + if (UseCompressedOops) { + bs->write_ref_array_pre((narrowOop*)dst, num); + } else { + bs->write_ref_array_pre((oop*)dst, num); + } Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); - BarrierSet* bs = Universe::heap()->barrier_set(); - bs->write_ref_array(MemRegion(dst, dst + num)); + bs->write_ref_array(dst, num); JRT_END diff -r 745c853ee57f -r 6484c4ee11cb src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Fri Jan 29 14:51:38 2010 -0800 +++ b/src/share/vm/classfile/javaClasses.cpp Mon Feb 01 17:29:01 2010 -0800 @@ -1121,10 +1121,23 @@ } void flush() { + // The following appears to have been an optimization to save from + // doing a barrier for each individual store into the _methods array, + // but rather to do it for the entire array after the series of writes. + // That optimization seems to have been lost when compressed oops was + // implemented. However, the extra card-marks below was left in place, + // but is now redundant because the individual stores into the + // _methods array already execute the barrier code. CR 6918185 has + // been filed so the original code may be restored by deferring the + // barriers until after the entire sequence of stores, thus re-enabling + // the intent of the original optimization. In the meantime the redundant + // card mark below is now disabled. if (_dirty && _methods != NULL) { +#if 0 BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); bs->write_ref_array((HeapWord*)_methods->base(), _methods->length()); +#endif _dirty = false; } } @@ -1168,9 +1181,7 @@ method = mhandle(); } - _methods->obj_at_put(_index, method); - // bad for UseCompressedOops - // *_methods->obj_at_addr(_index) = method; + _methods->obj_at_put(_index, method); _bcis->ushort_at_put(_index, bci); _index++; _dirty = true; diff -r 745c853ee57f -r 6484c4ee11cb src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Jan 29 14:51:38 2010 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Feb 01 17:29:01 2010 -0800 @@ -2505,6 +2505,7 @@ } void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { + // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Call allocation profiler AllocationProfiler::iterate_since_last_gc(); @@ -2518,6 +2519,7 @@ // is set. COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "derived pointer present")); + // always_do_update_barrier = true; } void G1CollectedHeap::do_collection_pause() { diff -r 745c853ee57f -r 6484c4ee11cb src/share/vm/memory/barrierSet.hpp --- a/src/share/vm/memory/barrierSet.hpp Fri Jan 29 14:51:38 2010 -0800 +++ b/src/share/vm/memory/barrierSet.hpp Mon Feb 01 17:29:01 2010 -0800 @@ -124,8 +124,6 @@ // Below length is the # array elements being written virtual void write_ref_array_pre( oop* dst, int length) {} virtual void write_ref_array_pre(narrowOop* dst, int length) {} - // Below MemRegion mr is expected to be HeapWord-aligned - inline void write_ref_array(MemRegion mr); // Below count is the # array elements being written, starting // at the address "start", which may not necessarily be HeapWord-aligned inline void write_ref_array(HeapWord* start, size_t count); diff -r 745c853ee57f -r 6484c4ee11cb src/share/vm/memory/barrierSet.inline.hpp --- a/src/share/vm/memory/barrierSet.inline.hpp Fri Jan 29 14:51:38 2010 -0800 +++ b/src/share/vm/memory/barrierSet.inline.hpp Mon Feb 01 17:29:01 2010 -0800 @@ -42,16 +42,6 @@ } } -void BarrierSet::write_ref_array(MemRegion mr) { - assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start"); - assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - if (kind() == CardTableModRef) { - ((CardTableModRefBS*)this)->inline_write_ref_array(mr); - } else { - write_ref_array_work(mr); - } -} - // count is number of array elements being written void BarrierSet::write_ref_array(HeapWord* start, size_t count) { assert(count <= (size_t)max_intx, "count too large"); @@ -61,12 +51,12 @@ // strictly necessary for current uses, but a case of good hygiene and, // if you will, aesthetics) and the second upward (this is essential for // current uses) to a HeapWord boundary, so we mark all cards overlapping - // this write. In the event that this evolves in the future to calling a + // this write. If this evolves in the future to calling a // logging barrier of narrow oop granularity, like the pre-barrier for G1 // (mentioned here merely by way of example), we will need to change this - // interface, much like the pre-barrier one above, so it is "exactly precise" - // (if i may be allowed the adverbial redundancy for emphasis) and does not - // include narrow oop slots not included in the original write interval. + // interface, so it is "exactly precise" (if i may be allowed the adverbial + // redundancy for emphasis) and does not include narrow oop slots not + // included in the original write interval. HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); // If compressed oops were not being used, these should already be aligned diff -r 745c853ee57f -r 6484c4ee11cb src/share/vm/runtime/stubRoutines.cpp --- a/src/share/vm/runtime/stubRoutines.cpp Fri Jan 29 14:51:38 2010 -0800 +++ b/src/share/vm/runtime/stubRoutines.cpp Mon Feb 01 17:29:01 2010 -0800 @@ -196,11 +196,19 @@ // Default versions of arraycopy functions // +static void gen_arraycopy_barrier_pre(oop* dest, size_t count) { + assert(count != 0, "count should be non-zero"); + assert(count <= (size_t)max_intx, "count too large"); + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt"); + bs->write_ref_array_pre(dest, (int)count); +} + static void gen_arraycopy_barrier(oop* dest, size_t count) { assert(count != 0, "count should be non-zero"); BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); - bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count))); + bs->write_ref_array((HeapWord*)dest, count); } JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count)) @@ -240,6 +248,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); + gen_arraycopy_barrier_pre(dest, count); Copy::conjoint_oops_atomic(src, dest, count); gen_arraycopy_barrier(dest, count); JRT_END @@ -281,6 +290,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); + gen_arraycopy_barrier_pre((oop *) dest, count); Copy::arrayof_conjoint_oops(src, dest, count); gen_arraycopy_barrier((oop *) dest, count); JRT_END