Mercurial > hg > truffle
comparison src/share/vm/oops/objArrayKlass.cpp @ 113:ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author | coleenp |
---|---|
date | Sun, 13 Apr 2008 17:43:42 -0400 |
parents | a61af66fc99e |
children | d1605aabd0a1 37f87013dfd8 |
comparison
equal
deleted
inserted
replaced
110:a49a647afe9a | 113:ba764ed4b6f2 |
---|---|
78 } | 78 } |
79 } | 79 } |
80 return h_array(); | 80 return h_array(); |
81 } | 81 } |
82 | 82 |
83 void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, | 83 // Either oop or narrowOop depending on UseCompressedOops. |
84 int dst_pos, int length, TRAPS) { | 84 template <class T> void objArrayKlass::do_copy(arrayOop s, T* src, |
85 assert(s->is_objArray(), "must be obj array"); | 85 arrayOop d, T* dst, int length, TRAPS) { |
86 | 86 |
87 if (!d->is_objArray()) { | 87 const size_t word_len = objArrayOopDesc::array_size(length); |
88 THROW(vmSymbols::java_lang_ArrayStoreException()); | |
89 } | |
90 | |
91 // Check is all offsets and lengths are non negative | |
92 if (src_pos < 0 || dst_pos < 0 || length < 0) { | |
93 THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); | |
94 } | |
95 // Check if the ranges are valid | |
96 if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) | |
97 || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { | |
98 THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); | |
99 } | |
100 | |
101 // Special case. Boundary cases must be checked first | |
102 // This allows the following call: copy_array(s, s.length(), d.length(), 0). | |
103 // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(), | |
104 // points to the right of the last element. | |
105 if (length==0) { | |
106 return; | |
107 } | |
108 | |
109 oop* const src = objArrayOop(s)->obj_at_addr(src_pos); | |
110 oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos); | |
111 const size_t word_len = length * HeapWordsPerOop; | |
112 | 88 |
113 // For performance reasons, we assume we are using a card marking write | 89 // For performance reasons, we assume we are using a card marking write |
114 // barrier. The assert will fail if this is not the case. | 90 // barrier. The assert will fail if this is not the case. |
115 BarrierSet* bs = Universe::heap()->barrier_set(); | 91 BarrierSet* bs = Universe::heap()->barrier_set(); |
116 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); | 92 assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); |
127 // elements are guaranteed to be subtypes, so no check necessary | 103 // elements are guaranteed to be subtypes, so no check necessary |
128 Copy::conjoint_oops_atomic(src, dst, length); | 104 Copy::conjoint_oops_atomic(src, dst, length); |
129 } else { | 105 } else { |
130 // slow case: need individual subtype checks | 106 // slow case: need individual subtype checks |
131 // note: don't use obj_at_put below because it includes a redundant store check | 107 // note: don't use obj_at_put below because it includes a redundant store check |
132 oop* from = src; | 108 T* from = src; |
133 oop* end = from + length; | 109 T* end = from + length; |
134 for (oop* p = dst; from < end; from++, p++) { | 110 for (T* p = dst; from < end; from++, p++) { |
135 oop element = *from; | 111 // XXX this is going to be slow. |
136 if (element == NULL || Klass::cast(element->klass())->is_subtype_of(bound)) { | 112 T element = *from; |
137 *p = element; | 113 if (oopDesc::is_null(element) || |
114 Klass::cast(oopDesc::decode_heap_oop_not_null(element)->klass())->is_subtype_of(bound)) { | |
115 *p = *from; | |
138 } else { | 116 } else { |
139 // We must do a barrier to cover the partial copy. | 117 // We must do a barrier to cover the partial copy. |
140 const size_t done_word_len = pointer_delta(p, dst, oopSize) * | 118 const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize); |
141 HeapWordsPerOop; | 119 // pointer delta is scaled to number of elements (length field in |
120 // objArrayOop) which we assume is 32 bit. | |
121 assert(pd == (size_t)(int)pd, "length field overflow"); | |
122 const size_t done_word_len = objArrayOopDesc::array_size((int)pd); | |
142 bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len)); | 123 bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len)); |
143 THROW(vmSymbols::java_lang_ArrayStoreException()); | 124 THROW(vmSymbols::java_lang_ArrayStoreException()); |
144 return; | 125 return; |
145 } | 126 } |
146 } | 127 } |
147 } | 128 } |
148 } | 129 } |
149 bs->write_ref_array(MemRegion((HeapWord*)dst, word_len)); | 130 bs->write_ref_array(MemRegion((HeapWord*)dst, word_len)); |
131 } | |
132 | |
133 void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, | |
134 int dst_pos, int length, TRAPS) { | |
135 assert(s->is_objArray(), "must be obj array"); | |
136 | |
137 if (!d->is_objArray()) { | |
138 THROW(vmSymbols::java_lang_ArrayStoreException()); | |
139 } | |
140 | |
141 // Check is all offsets and lengths are non negative | |
142 if (src_pos < 0 || dst_pos < 0 || length < 0) { | |
143 THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); | |
144 } | |
145 // Check if the ranges are valid | |
146 if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) | |
147 || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { | |
148 THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); | |
149 } | |
150 | |
151 // Special case. Boundary cases must be checked first | |
152 // This allows the following call: copy_array(s, s.length(), d.length(), 0). | |
153 // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(), | |
154 // points to the right of the last element. | |
155 if (length==0) { | |
156 return; | |
157 } | |
158 if (UseCompressedOops) { | |
159 narrowOop* const src = objArrayOop(s)->obj_at_addr<narrowOop>(src_pos); | |
160 narrowOop* const dst = objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos); | |
161 do_copy<narrowOop>(s, src, d, dst, length, CHECK); | |
162 } else { | |
163 oop* const src = objArrayOop(s)->obj_at_addr<oop>(src_pos); | |
164 oop* const dst = objArrayOop(d)->obj_at_addr<oop>(dst_pos); | |
165 do_copy<oop> (s, src, d, dst, length, CHECK); | |
166 } | |
150 } | 167 } |
151 | 168 |
152 | 169 |
153 klassOop objArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) { | 170 klassOop objArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) { |
154 objArrayKlassHandle h_this(THREAD, as_klassOop()); | 171 objArrayKlassHandle h_this(THREAD, as_klassOop()); |
240 | 257 |
241 objArrayKlass* oak = objArrayKlass::cast(k); | 258 objArrayKlass* oak = objArrayKlass::cast(k); |
242 return element_klass()->klass_part()->is_subtype_of(oak->element_klass()); | 259 return element_klass()->klass_part()->is_subtype_of(oak->element_klass()); |
243 } | 260 } |
244 | 261 |
245 | |
246 void objArrayKlass::initialize(TRAPS) { | 262 void objArrayKlass::initialize(TRAPS) { |
247 Klass::cast(bottom_klass())->initialize(THREAD); // dispatches to either instanceKlass or typeArrayKlass | 263 Klass::cast(bottom_klass())->initialize(THREAD); // dispatches to either instanceKlass or typeArrayKlass |
248 } | 264 } |
249 | 265 |
266 #define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \ | |
267 { \ | |
268 T* p = (T*)(a)->base(); \ | |
269 T* const end = p + (a)->length(); \ | |
270 while (p < end) { \ | |
271 do_oop; \ | |
272 p++; \ | |
273 } \ | |
274 } | |
275 | |
276 #define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \ | |
277 { \ | |
278 T* const l = (T*)(low); \ | |
279 T* const h = (T*)(high); \ | |
280 T* p = (T*)(a)->base(); \ | |
281 T* end = p + (a)->length(); \ | |
282 if (p < l) p = l; \ | |
283 if (end > h) end = h; \ | |
284 while (p < end) { \ | |
285 do_oop; \ | |
286 ++p; \ | |
287 } \ | |
288 } | |
289 | |
290 #define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \ | |
291 if (UseCompressedOops) { \ | |
292 ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ | |
293 a, p, do_oop) \ | |
294 } else { \ | |
295 ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \ | |
296 a, p, do_oop) \ | |
297 } | |
298 | |
299 #define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \ | |
300 if (UseCompressedOops) { \ | |
301 ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ | |
302 a, p, low, high, do_oop) \ | |
303 } else { \ | |
304 ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ | |
305 a, p, low, high, do_oop) \ | |
306 } | |
250 | 307 |
251 void objArrayKlass::oop_follow_contents(oop obj) { | 308 void objArrayKlass::oop_follow_contents(oop obj) { |
252 assert (obj->is_array(), "obj must be array"); | 309 assert (obj->is_array(), "obj must be array"); |
253 arrayOop a = arrayOop(obj); | 310 objArrayOop a = objArrayOop(obj); |
254 a->follow_header(); | 311 a->follow_header(); |
255 oop* base = (oop*)a->base(T_OBJECT); | 312 ObjArrayKlass_OOP_ITERATE( \ |
256 oop* const end = base + a->length(); | 313 a, p, \ |
257 while (base < end) { | 314 /* we call mark_and_follow here to avoid excessive marking stack usage */ \ |
258 if (*base != NULL) | 315 MarkSweep::mark_and_follow(p)) |
259 // we call mark_and_follow here to avoid excessive marking stack usage | |
260 MarkSweep::mark_and_follow(base); | |
261 base++; | |
262 } | |
263 } | 316 } |
264 | 317 |
265 #ifndef SERIALGC | 318 #ifndef SERIALGC |
266 void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, | 319 void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, |
267 oop obj) { | 320 oop obj) { |
268 assert (obj->is_array(), "obj must be array"); | 321 assert (obj->is_array(), "obj must be array"); |
269 arrayOop a = arrayOop(obj); | 322 objArrayOop a = objArrayOop(obj); |
270 a->follow_header(cm); | 323 a->follow_header(cm); |
271 oop* base = (oop*)a->base(T_OBJECT); | 324 ObjArrayKlass_OOP_ITERATE( \ |
272 oop* const end = base + a->length(); | 325 a, p, \ |
273 while (base < end) { | 326 /* we call mark_and_follow here to avoid excessive marking stack usage */ \ |
274 if (*base != NULL) | 327 PSParallelCompact::mark_and_follow(cm, p)) |
275 // we call mark_and_follow here to avoid excessive marking stack usage | |
276 PSParallelCompact::mark_and_follow(cm, base); | |
277 base++; | |
278 } | |
279 } | 328 } |
280 #endif // SERIALGC | 329 #endif // SERIALGC |
281 | |
282 #define invoke_closure_on(base, closure, nv_suffix) { \ | |
283 if (*(base) != NULL) { \ | |
284 (closure)->do_oop##nv_suffix(base); \ | |
285 } \ | |
286 } | |
287 | 330 |
288 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ | 331 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
289 \ | 332 \ |
290 int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ | 333 int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ |
291 OopClosureType* closure) { \ | 334 OopClosureType* closure) { \ |
296 /* Don't call size() or oop_size() since that is a virtual call. */ \ | 339 /* Don't call size() or oop_size() since that is a virtual call. */ \ |
297 int size = a->object_size(); \ | 340 int size = a->object_size(); \ |
298 if (closure->do_header()) { \ | 341 if (closure->do_header()) { \ |
299 a->oop_iterate_header(closure); \ | 342 a->oop_iterate_header(closure); \ |
300 } \ | 343 } \ |
301 oop* base = a->base(); \ | 344 ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \ |
302 oop* const end = base + a->length(); \ | |
303 const intx field_offset = PrefetchFieldsAhead; \ | |
304 if (field_offset > 0) { \ | |
305 while (base < end) { \ | |
306 prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \ | |
307 invoke_closure_on(base, closure, nv_suffix); \ | |
308 base++; \ | |
309 } \ | |
310 } else { \ | |
311 while (base < end) { \ | |
312 invoke_closure_on(base, closure, nv_suffix); \ | |
313 base++; \ | |
314 } \ | |
315 } \ | |
316 return size; \ | 345 return size; \ |
317 } | 346 } |
318 | 347 |
319 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ | 348 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ |
320 \ | 349 \ |
328 /* Don't call size() or oop_size() since that is a virtual call */ \ | 357 /* Don't call size() or oop_size() since that is a virtual call */ \ |
329 int size = a->object_size(); \ | 358 int size = a->object_size(); \ |
330 if (closure->do_header()) { \ | 359 if (closure->do_header()) { \ |
331 a->oop_iterate_header(closure, mr); \ | 360 a->oop_iterate_header(closure, mr); \ |
332 } \ | 361 } \ |
333 oop* bottom = (oop*)mr.start(); \ | 362 ObjArrayKlass_BOUNDED_OOP_ITERATE( \ |
334 oop* top = (oop*)mr.end(); \ | 363 a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \ |
335 oop* base = a->base(); \ | 364 return size; \ |
336 oop* end = base + a->length(); \ | 365 } |
337 if (base < bottom) { \ | 366 |
338 base = bottom; \ | 367 // Like oop_oop_iterate but only iterates over a specified range and only used |
339 } \ | 368 // for objArrayOops. |
340 if (end > top) { \ | 369 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ |
341 end = top; \ | 370 \ |
342 } \ | 371 int objArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \ |
343 const intx field_offset = PrefetchFieldsAhead; \ | 372 OopClosureType* closure, \ |
344 if (field_offset > 0) { \ | 373 int start, int end) { \ |
345 while (base < end) { \ | 374 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \ |
346 prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \ | 375 assert(obj->is_array(), "obj must be array"); \ |
347 invoke_closure_on(base, closure, nv_suffix); \ | 376 objArrayOop a = objArrayOop(obj); \ |
348 base++; \ | 377 /* Get size before changing pointers. */ \ |
378 /* Don't call size() or oop_size() since that is a virtual call */ \ | |
379 int size = a->object_size(); \ | |
380 if (UseCompressedOops) { \ | |
381 HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<narrowOop>(start);\ | |
382 /* this might be wierd if end needs to be aligned on HeapWord boundary */ \ | |
383 HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \ | |
384 MemRegion mr(low, high); \ | |
385 if (closure->do_header()) { \ | |
386 a->oop_iterate_header(closure, mr); \ | |
349 } \ | 387 } \ |
388 ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ | |
389 a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ | |
350 } else { \ | 390 } else { \ |
351 while (base < end) { \ | 391 HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<oop>(start); \ |
352 invoke_closure_on(base, closure, nv_suffix); \ | 392 HeapWord* high = (HeapWord*)((oop*)a->base() + end); \ |
353 base++; \ | 393 MemRegion mr(low, high); \ |
394 if (closure->do_header()) { \ | |
395 a->oop_iterate_header(closure, mr); \ | |
354 } \ | 396 } \ |
397 ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ | |
398 a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ | |
355 } \ | 399 } \ |
356 return size; \ | 400 return size; \ |
357 } | 401 } |
358 | 402 |
359 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) | 403 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) |
360 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) | 404 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) |
361 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) | 405 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) |
362 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) | 406 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) |
407 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) | |
408 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) | |
363 | 409 |
364 int objArrayKlass::oop_adjust_pointers(oop obj) { | 410 int objArrayKlass::oop_adjust_pointers(oop obj) { |
365 assert(obj->is_objArray(), "obj must be obj array"); | 411 assert(obj->is_objArray(), "obj must be obj array"); |
366 objArrayOop a = objArrayOop(obj); | 412 objArrayOop a = objArrayOop(obj); |
367 // Get size before changing pointers. | 413 // Get size before changing pointers. |
368 // Don't call size() or oop_size() since that is a virtual call. | 414 // Don't call size() or oop_size() since that is a virtual call. |
369 int size = a->object_size(); | 415 int size = a->object_size(); |
370 a->adjust_header(); | 416 a->adjust_header(); |
371 oop* base = a->base(); | 417 ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p)) |
372 oop* const end = base + a->length(); | |
373 while (base < end) { | |
374 MarkSweep::adjust_pointer(base); | |
375 base++; | |
376 } | |
377 return size; | 418 return size; |
378 } | 419 } |
379 | 420 |
380 #ifndef SERIALGC | 421 #ifndef SERIALGC |
381 void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { | 422 void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { |
382 assert(!pm->depth_first(), "invariant"); | 423 assert(!pm->depth_first(), "invariant"); |
383 assert(obj->is_objArray(), "obj must be obj array"); | 424 assert(obj->is_objArray(), "obj must be obj array"); |
384 // Compute oop range | 425 ObjArrayKlass_OOP_ITERATE( \ |
385 oop* curr = objArrayOop(obj)->base(); | 426 objArrayOop(obj), p, \ |
386 oop* end = curr + objArrayOop(obj)->length(); | 427 if (PSScavenge::should_scavenge(p)) { \ |
387 // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size"); | 428 pm->claim_or_forward_breadth(p); \ |
388 assert(align_object_size(pointer_delta(end, obj, sizeof(oop*))) | 429 }) |
389 == oop_size(obj), "checking size"); | |
390 | |
391 // Iterate over oops | |
392 while (curr < end) { | |
393 if (PSScavenge::should_scavenge(*curr)) { | |
394 pm->claim_or_forward_breadth(curr); | |
395 } | |
396 ++curr; | |
397 } | |
398 } | 430 } |
399 | 431 |
400 void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { | 432 void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { |
401 assert(pm->depth_first(), "invariant"); | 433 assert(pm->depth_first(), "invariant"); |
402 assert(obj->is_objArray(), "obj must be obj array"); | 434 assert(obj->is_objArray(), "obj must be obj array"); |
403 // Compute oop range | 435 ObjArrayKlass_OOP_ITERATE( \ |
404 oop* curr = objArrayOop(obj)->base(); | 436 objArrayOop(obj), p, \ |
405 oop* end = curr + objArrayOop(obj)->length(); | 437 if (PSScavenge::should_scavenge(p)) { \ |
406 // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size"); | 438 pm->claim_or_forward_depth(p); \ |
407 assert(align_object_size(pointer_delta(end, obj, sizeof(oop*))) | 439 }) |
408 == oop_size(obj), "checking size"); | |
409 | |
410 // Iterate over oops | |
411 while (curr < end) { | |
412 if (PSScavenge::should_scavenge(*curr)) { | |
413 pm->claim_or_forward_depth(curr); | |
414 } | |
415 ++curr; | |
416 } | |
417 } | 440 } |
418 | 441 |
419 int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { | 442 int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { |
420 assert (obj->is_objArray(), "obj must be obj array"); | 443 assert (obj->is_objArray(), "obj must be obj array"); |
421 objArrayOop a = objArrayOop(obj); | 444 objArrayOop a = objArrayOop(obj); |
422 | 445 ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p)) |
423 oop* const base = a->base(); | |
424 oop* const beg_oop = base; | |
425 oop* const end_oop = base + a->length(); | |
426 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { | |
427 PSParallelCompact::adjust_pointer(cur_oop); | |
428 } | |
429 return a->object_size(); | 446 return a->object_size(); |
430 } | 447 } |
431 | 448 |
432 int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, | 449 int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, |
433 HeapWord* beg_addr, HeapWord* end_addr) { | 450 HeapWord* beg_addr, HeapWord* end_addr) { |
434 assert (obj->is_objArray(), "obj must be obj array"); | 451 assert (obj->is_objArray(), "obj must be obj array"); |
435 objArrayOop a = objArrayOop(obj); | 452 objArrayOop a = objArrayOop(obj); |
436 | 453 ObjArrayKlass_BOUNDED_OOP_ITERATE( \ |
437 oop* const base = a->base(); | 454 a, p, beg_addr, end_addr, \ |
438 oop* const beg_oop = MAX2((oop*)beg_addr, base); | 455 PSParallelCompact::adjust_pointer(p)) |
439 oop* const end_oop = MIN2((oop*)end_addr, base + a->length()); | |
440 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { | |
441 PSParallelCompact::adjust_pointer(cur_oop); | |
442 } | |
443 return a->object_size(); | 456 return a->object_size(); |
444 } | 457 } |
445 #endif // SERIALGC | 458 #endif // SERIALGC |
446 | 459 |
447 // JVM support | 460 // JVM support |
507 void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) { | 520 void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) { |
508 /* $$$ move into remembered set verification? | 521 /* $$$ move into remembered set verification? |
509 RememberedSet::verify_old_oop(obj, p, allow_dirty, true); | 522 RememberedSet::verify_old_oop(obj, p, allow_dirty, true); |
510 */ | 523 */ |
511 } | 524 } |
525 void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {} |