comparison src/share/vm/memory/barrierSet.inline.hpp @ 1091:6aa7255741f3

6906727: UseCompressedOops: some card-marking fixes related to object arrays Summary: Introduced a new write_ref_array(HeapWords* start, size_t count) method that does the requisite MemRegion range calculation so (some of the) clients of the erstwhile write_ref_array(MemRegion mr) do not need to worry. This removed all external uses of array_size(), which was also simplified and made private. Asserts were added to catch other possible issues. Further, less essential, fixes stemming from this investigation are deferred to CR 6904516 (to follow shortly in hs17). Reviewed-by: kvn, coleenp, jmasa
author ysr
date Thu, 03 Dec 2009 15:01:57 -0800
parents df6caf649ff7
children 6484c4ee11cb
comparison
equal deleted inserted replaced
1090:fa357420e7d2 1091:6aa7255741f3
41 write_ref_field_work(field, new_val); 41 write_ref_field_work(field, new_val);
42 } 42 }
43 } 43 }
44 44
45 void BarrierSet::write_ref_array(MemRegion mr) { 45 void BarrierSet::write_ref_array(MemRegion mr) {
46 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
47 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
46 if (kind() == CardTableModRef) { 48 if (kind() == CardTableModRef) {
47 ((CardTableModRefBS*)this)->inline_write_ref_array(mr); 49 ((CardTableModRefBS*)this)->inline_write_ref_array(mr);
48 } else { 50 } else {
49 write_ref_array_work(mr); 51 write_ref_array_work(mr);
50 } 52 }
51 } 53 }
54
55 // count is number of array elements being written
56 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
57 assert(count <= (size_t)max_intx, "count too large");
58 HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
59 // In the case of compressed oops, start and end may potentially be misaligned;
60 // so we need to conservatively align the first downward (this is not
61 // strictly necessary for current uses, but a case of good hygiene and,
62 // if you will, aesthetics) and the second upward (this is essential for
63 // current uses) to a HeapWord boundary, so we mark all cards overlapping
64 // this write. In the event that this evolves in the future to calling a
65 // logging barrier of narrow oop granularity, like the pre-barrier for G1
66 // (mentioned here merely by way of example), we will need to change this
67 // interface, much like the pre-barrier one above, so it is "exactly precise"
68 // (if i may be allowed the adverbial redundancy for emphasis) and does not
69 // include narrow oop slots not included in the original write interval.
70 HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
71 HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
72 // If compressed oops were not being used, these should already be aligned
73 assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
74 "Expected heap word alignment of start and end");
75 #if 0
76 warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
77 start, count, aligned_start, aligned_end);
78 #endif
79 write_ref_array_work(MemRegion(aligned_start, aligned_end));
80 }
81
52 82
53 void BarrierSet::write_region(MemRegion mr) { 83 void BarrierSet::write_region(MemRegion mr) {
54 if (kind() == CardTableModRef) { 84 if (kind() == CardTableModRef) {
55 ((CardTableModRefBS*)this)->inline_write_region(mr); 85 ((CardTableModRefBS*)this)->inline_write_region(mr);
56 } else { 86 } else {