Mercurial > hg > graal-jvmci-8
comparison src/share/vm/oops/objArrayKlass.cpp @ 362:f8199438385b
Merge
author | apetrusenko |
---|---|
date | Wed, 17 Sep 2008 16:49:18 +0400 |
parents | 1ee8caae33af |
children | 443791f333a2 |
comparison
equal
deleted
inserted
replaced
316:5fa96a5a7e76 | 362:f8199438385b |
---|---|
84 template <class T> void objArrayKlass::do_copy(arrayOop s, T* src, | 84 template <class T> void objArrayKlass::do_copy(arrayOop s, T* src, |
85 arrayOop d, T* dst, int length, TRAPS) { | 85 arrayOop d, T* dst, int length, TRAPS) { |
86 | 86 |
87 const size_t word_len = objArrayOopDesc::array_size(length); | 87 const size_t word_len = objArrayOopDesc::array_size(length); |
88 | 88 |
89 // For performance reasons, we assume we are using a card marking write | |
90 // barrier. The assert will fail if this is not the case. | |
91 BarrierSet* bs = Universe::heap()->barrier_set(); | 89 BarrierSet* bs = Universe::heap()->barrier_set(); |
90 // For performance reasons, we assume we are that the write barrier we | |
91 // are using has optimized modes for arrays of references. At least one | |
92 // of the asserts below will fail if this is not the case. | |
92 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); | 93 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); |
93 | 94 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); |
95 | |
96 MemRegion dst_mr = MemRegion((HeapWord*)dst, word_len); | |
94 if (s == d) { | 97 if (s == d) { |
95 // since source and destination are equal we do not need conversion checks. | 98 // since source and destination are equal we do not need conversion checks. |
96 assert(length > 0, "sanity check"); | 99 assert(length > 0, "sanity check"); |
100 bs->write_ref_array_pre(dst_mr); | |
97 Copy::conjoint_oops_atomic(src, dst, length); | 101 Copy::conjoint_oops_atomic(src, dst, length); |
98 } else { | 102 } else { |
99 // We have to make sure all elements conform to the destination array | 103 // We have to make sure all elements conform to the destination array |
100 klassOop bound = objArrayKlass::cast(d->klass())->element_klass(); | 104 klassOop bound = objArrayKlass::cast(d->klass())->element_klass(); |
101 klassOop stype = objArrayKlass::cast(s->klass())->element_klass(); | 105 klassOop stype = objArrayKlass::cast(s->klass())->element_klass(); |
102 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { | 106 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { |
103 // elements are guaranteed to be subtypes, so no check necessary | 107 // elements are guaranteed to be subtypes, so no check necessary |
108 bs->write_ref_array_pre(dst_mr); | |
104 Copy::conjoint_oops_atomic(src, dst, length); | 109 Copy::conjoint_oops_atomic(src, dst, length); |
105 } else { | 110 } else { |
106 // slow case: need individual subtype checks | 111 // slow case: need individual subtype checks |
107 // note: don't use obj_at_put below because it includes a redundant store check | 112 // note: don't use obj_at_put below because it includes a redundant store check |
108 T* from = src; | 113 T* from = src; |
109 T* end = from + length; | 114 T* end = from + length; |
110 for (T* p = dst; from < end; from++, p++) { | 115 for (T* p = dst; from < end; from++, p++) { |
111 // XXX this is going to be slow. | 116 // XXX this is going to be slow. |
112 T element = *from; | 117 T element = *from; |
113 if (oopDesc::is_null(element) || | 118 // even slower now |
114 Klass::cast(oopDesc::decode_heap_oop_not_null(element)->klass())->is_subtype_of(bound)) { | 119 bool element_is_null = oopDesc::is_null(element); |
120 oop new_val = element_is_null ? oop(NULL) | |
121 : oopDesc::decode_heap_oop_not_null(element); | |
122 if (element_is_null || | |
123 Klass::cast((new_val->klass()))->is_subtype_of(bound)) { | |
124 bs->write_ref_field_pre(p, new_val); | |
115 *p = *from; | 125 *p = *from; |
116 } else { | 126 } else { |
117 // We must do a barrier to cover the partial copy. | 127 // We must do a barrier to cover the partial copy. |
118 const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize); | 128 const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize); |
119 // pointer delta is scaled to number of elements (length field in | 129 // pointer delta is scaled to number of elements (length field in |
399 } \ | 409 } \ |
400 return size; \ | 410 return size; \ |
401 } | 411 } |
402 | 412 |
403 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) | 413 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) |
404 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) | 414 ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) |
405 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) | 415 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) |
406 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) | 416 ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) |
407 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) | 417 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) |
408 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) | 418 ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) |
409 | 419 |
410 int objArrayKlass::oop_adjust_pointers(oop obj) { | 420 int objArrayKlass::oop_adjust_pointers(oop obj) { |
411 assert(obj->is_objArray(), "obj must be obj array"); | 421 assert(obj->is_objArray(), "obj must be obj array"); |
412 objArrayOop a = objArrayOop(obj); | 422 objArrayOop a = objArrayOop(obj); |
413 // Get size before changing pointers. | 423 // Get size before changing pointers. |