comparison src/share/vm/memory/barrierSet.inline.hpp @ 1245:6484c4ee11cb

6904516: More object array barrier fixes, following up on 6906727 Summary: Fixed missing pre-barrier calls for G1, modified C1 to call pre- and correct post-barrier interfaces, deleted obsolete interface, (temporarily) disabled redundant deferred barrier in BacktraceBuilder. Reviewed-by: coleenp, jmasa, kvn, never
author ysr
date Mon, 01 Feb 2010 17:29:01 -0800
parents 6aa7255741f3
children c18cbe5936b8
comparison
equal deleted inserted replaced
1244:745c853ee57f 1245:6484c4ee11cb
40 } else { 40 } else {
41 write_ref_field_work(field, new_val); 41 write_ref_field_work(field, new_val);
42 } 42 }
43 } 43 }
44 44
45 void BarrierSet::write_ref_array(MemRegion mr) {
46 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
47 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
48 if (kind() == CardTableModRef) {
49 ((CardTableModRefBS*)this)->inline_write_ref_array(mr);
50 } else {
51 write_ref_array_work(mr);
52 }
53 }
54
55 // count is number of array elements being written 45 // count is number of array elements being written
56 void BarrierSet::write_ref_array(HeapWord* start, size_t count) { 46 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
57 assert(count <= (size_t)max_intx, "count too large"); 47 assert(count <= (size_t)max_intx, "count too large");
58 HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); 48 HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
59 // In the case of compressed oops, start and end may potentially be misaligned; 49 // In the case of compressed oops, start and end may potentially be misaligned;
60 // so we need to conservatively align the first downward (this is not 50 // so we need to conservatively align the first downward (this is not
61 // strictly necessary for current uses, but a case of good hygiene and, 51 // strictly necessary for current uses, but a case of good hygiene and,
62 // if you will, aesthetics) and the second upward (this is essential for 52 // if you will, aesthetics) and the second upward (this is essential for
63 // current uses) to a HeapWord boundary, so we mark all cards overlapping 53 // current uses) to a HeapWord boundary, so we mark all cards overlapping
64 // this write. In the event that this evolves in the future to calling a 54 // this write. If this evolves in the future to calling a
65 // logging barrier of narrow oop granularity, like the pre-barrier for G1 55 // logging barrier of narrow oop granularity, like the pre-barrier for G1
66 // (mentioned here merely by way of example), we will need to change this 56 // (mentioned here merely by way of example), we will need to change this
67 // interface, much like the pre-barrier one above, so it is "exactly precise" 57 // interface, so it is "exactly precise" (if i may be allowed the adverbial
68 // (if i may be allowed the adverbial redundancy for emphasis) and does not 58 // redundancy for emphasis) and does not include narrow oop slots not
69 // include narrow oop slots not included in the original write interval. 59 // included in the original write interval.
70 HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); 60 HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
71 HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); 61 HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
72 // If compressed oops were not being used, these should already be aligned 62 // If compressed oops were not being used, these should already be aligned
73 assert(UseCompressedOops || (aligned_start == start && aligned_end == end), 63 assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
74 "Expected heap word alignment of start and end"); 64 "Expected heap word alignment of start and end");