Mercurial > hg > truffle
comparison src/share/vm/oops/oop.inline.hpp @ 360:5d254928c888
Merge
author | ysr |
---|---|
date | Wed, 27 Aug 2008 11:20:46 -0700 |
parents | 1ee8caae33af |
children | df4305d4c1a1 |
comparison
equal
deleted
inserted
replaced
341:d60e4e6d7f72 | 360:5d254928c888 |
---|---|
393 // skipping the intermediate round to HeapWordSize. Cast the result | 393 // skipping the intermediate round to HeapWordSize. Cast the result |
394 // of round_to to size_t to guarantee unsigned division == right shift. | 394 // of round_to to size_t to guarantee unsigned division == right shift. |
395 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / | 395 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / |
396 HeapWordSize); | 396 HeapWordSize); |
397 | 397 |
398 // UseParNewGC can change the length field of an "old copy" of an object | 398 // UseParNewGC, UseParallelGC and UseG1GC can change the length field |
399 // array in the young gen so it indicates the stealable portion of | 399 // of an "old copy" of an object array in the young gen so it indicates |
400 // an already copied array. This will cause the first disjunct below | 400 // the grey portion of an already copied array. This will cause the first |
401 // to fail if the sizes are computed across such a concurrent change. | 401 // disjunct below to fail if the two comparands are computed across such |
402 // a concurrent change. | |
402 // UseParNewGC also runs with promotion labs (which look like int | 403 // UseParNewGC also runs with promotion labs (which look like int |
403 // filler arrays) which are subject to changing their declared size | 404 // filler arrays) which are subject to changing their declared size |
404 // when finally retiring a PLAB; this also can cause the first disjunct | 405 // when finally retiring a PLAB; this also can cause the first disjunct |
405 // to fail for another worker thread that is concurrently walking the block | 406 // to fail for another worker thread that is concurrently walking the block |
406 // offset table. Both these invariant failures are benign for their | 407 // offset table. Both these invariant failures are benign for their |
407 // current uses; we relax the assertion checking to cover these two cases below: | 408 // current uses; we relax the assertion checking to cover these two cases below: |
408 // is_objArray() && is_forwarded() // covers first scenario above | 409 // is_objArray() && is_forwarded() // covers first scenario above |
409 // || is_typeArray() // covers second scenario above | 410 // || is_typeArray() // covers second scenario above |
410 // If and when UseParallelGC uses the same obj array oop stealing/chunking | 411 // If and when UseParallelGC uses the same obj array oop stealing/chunking |
411 // technique, or when G1 is integrated (and currently uses this array chunking | 412 // technique, we will need to suitably modify the assertion. |
412 // technique) we will need to suitably modify the assertion. | |
413 assert((s == klass->oop_size(this)) || | 413 assert((s == klass->oop_size(this)) || |
414 (((UseParNewGC || UseParallelGC) && | 414 (Universe::heap()->is_gc_active() && |
415 Universe::heap()->is_gc_active()) && | 415 ((is_typeArray() && UseParNewGC) || |
416 (is_typeArray() || | 416 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))), |
417 (is_objArray() && is_forwarded()))), | |
418 "wrong array object size"); | 417 "wrong array object size"); |
419 } else { | 418 } else { |
420 // Must be zero, so bite the bullet and take the virtual call. | 419 // Must be zero, so bite the bullet and take the virtual call. |
421 s = klass->oop_size(this); | 420 s = klass->oop_size(this); |
422 } | 421 } |
439 inline void update_barrier_set(void* p, oop v) { | 438 inline void update_barrier_set(void* p, oop v) { |
440 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); | 439 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); |
441 oopDesc::bs()->write_ref_field(p, v); | 440 oopDesc::bs()->write_ref_field(p, v); |
442 } | 441 } |
443 | 442 |
443 inline void update_barrier_set_pre(void* p, oop v) { | |
444 oopDesc::bs()->write_ref_field_pre(p, v); | |
445 } | |
446 | |
444 template <class T> inline void oop_store(T* p, oop v) { | 447 template <class T> inline void oop_store(T* p, oop v) { |
445 if (always_do_update_barrier) { | 448 if (always_do_update_barrier) { |
446 oop_store((volatile T*)p, v); | 449 oop_store((volatile T*)p, v); |
447 } else { | 450 } else { |
451 update_barrier_set_pre(p, v); | |
448 oopDesc::encode_store_heap_oop(p, v); | 452 oopDesc::encode_store_heap_oop(p, v); |
449 update_barrier_set(p, v); | 453 update_barrier_set(p, v); |
450 } | 454 } |
451 } | 455 } |
452 | 456 |
453 template <class T> inline void oop_store(volatile T* p, oop v) { | 457 template <class T> inline void oop_store(volatile T* p, oop v) { |
458 update_barrier_set_pre((void*)p, v); | |
454 // Used by release_obj_field_put, so use release_store_ptr. | 459 // Used by release_obj_field_put, so use release_store_ptr. |
455 oopDesc::release_encode_store_heap_oop(p, v); | 460 oopDesc::release_encode_store_heap_oop(p, v); |
456 update_barrier_set((void*)p, v); | 461 update_barrier_set((void*)p, v); |
457 } | 462 } |
458 | 463 |
696 SpecializationStats::record_call(); \ | 701 SpecializationStats::record_call(); \ |
697 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ | 702 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ |
698 } | 703 } |
699 | 704 |
700 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) | 705 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) |
701 ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DEFN) | 706 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) |
702 | 707 |
708 #ifndef SERIALGC | |
709 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ | |
710 \ | |
711 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ | |
712 SpecializationStats::record_call(); \ | |
713 return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ | |
714 } | |
715 | |
716 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) | |
717 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) | |
718 #endif // !SERIALGC | |
703 | 719 |
704 inline bool oopDesc::is_shared() const { | 720 inline bool oopDesc::is_shared() const { |
705 return CompactingPermGenGen::is_shared(this); | 721 return CompactingPermGenGen::is_shared(this); |
706 } | 722 } |
707 | 723 |