comparison src/share/vm/oops/oop.inline.hpp @ 342:37f87013dfd8

6711316: Open source the Garbage-First garbage collector Summary: First mercurial integration of the code for the Garbage-First garbage collector. Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author ysr
date Thu, 05 Jun 2008 15:57:56 -0700
parents b7268662a986
children 6aae2f9d0294
comparison
equal deleted inserted replaced
189:0b27f3512f9e 342:37f87013dfd8
378 // skipping the intermediate round to HeapWordSize. Cast the result 378 // skipping the intermediate round to HeapWordSize. Cast the result
379 // of round_to to size_t to guarantee unsigned division == right shift. 379 // of round_to to size_t to guarantee unsigned division == right shift.
380 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / 380 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
381 HeapWordSize); 381 HeapWordSize);
382 382
383 // UseParNewGC can change the length field of an "old copy" of an object 383 // UseParNewGC, UseParallelGC and UseG1GC can change the length field
384 // array in the young gen so it indicates the stealable portion of 384 // of an "old copy" of an object array in the young gen so it indicates
385 // an already copied array. This will cause the first disjunct below 385 // the grey portion of an already copied array. This will cause the first
386 // to fail if the sizes are computed across such a concurrent change. 386 // disjunct below to fail if the two comparands are computed across such
387 // a concurrent change.
387 // UseParNewGC also runs with promotion labs (which look like int 388 // UseParNewGC also runs with promotion labs (which look like int
388 // filler arrays) which are subject to changing their declared size 389 // filler arrays) which are subject to changing their declared size
389 // when finally retiring a PLAB; this also can cause the first disjunct 390 // when finally retiring a PLAB; this also can cause the first disjunct
390 // to fail for another worker thread that is concurrently walking the block 391 // to fail for another worker thread that is concurrently walking the block
391 // offset table. Both these invariant failures are benign for their 392 // offset table. Both these invariant failures are benign for their
392 // current uses; we relax the assertion checking to cover these two cases below: 393 // current uses; we relax the assertion checking to cover these two cases below:
393 // is_objArray() && is_forwarded() // covers first scenario above 394 // is_objArray() && is_forwarded() // covers first scenario above
394 // || is_typeArray() // covers second scenario above 395 // || is_typeArray() // covers second scenario above
395 // If and when UseParallelGC uses the same obj array oop stealing/chunking 396 // If and when UseParallelGC uses the same obj array oop stealing/chunking
396 // technique, or when G1 is integrated (and currently uses this array chunking 397 // technique, we will need to suitably modify the assertion.
397 // technique) we will need to suitably modify the assertion.
398 assert((s == klass->oop_size(this)) || 398 assert((s == klass->oop_size(this)) ||
399 (((UseParNewGC || UseParallelGC) && 399 (Universe::heap()->is_gc_active() &&
400 Universe::heap()->is_gc_active()) && 400 ((is_typeArray() && UseParNewGC) ||
401 (is_typeArray() || 401 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
402 (is_objArray() && is_forwarded()))),
403 "wrong array object size"); 402 "wrong array object size");
404 } else { 403 } else {
405 // Must be zero, so bite the bullet and take the virtual call. 404 // Must be zero, so bite the bullet and take the virtual call.
406 s = klass->oop_size(this); 405 s = klass->oop_size(this);
407 } 406 }
424 inline void update_barrier_set(void* p, oop v) { 423 inline void update_barrier_set(void* p, oop v) {
425 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 424 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
426 oopDesc::bs()->write_ref_field(p, v); 425 oopDesc::bs()->write_ref_field(p, v);
427 } 426 }
428 427
428 inline void update_barrier_set_pre(void* p, oop v) {
429 oopDesc::bs()->write_ref_field_pre(p, v);
430 }
431
429 template <class T> inline void oop_store(T* p, oop v) { 432 template <class T> inline void oop_store(T* p, oop v) {
430 if (always_do_update_barrier) { 433 if (always_do_update_barrier) {
431 oop_store((volatile T*)p, v); 434 oop_store((volatile T*)p, v);
432 } else { 435 } else {
436 update_barrier_set_pre(p, v);
433 oopDesc::encode_store_heap_oop(p, v); 437 oopDesc::encode_store_heap_oop(p, v);
434 update_barrier_set(p, v); 438 update_barrier_set(p, v);
435 } 439 }
436 } 440 }
437 441
438 template <class T> inline void oop_store(volatile T* p, oop v) { 442 template <class T> inline void oop_store(volatile T* p, oop v) {
443 update_barrier_set_pre((void*)p, v);
439 // Used by release_obj_field_put, so use release_store_ptr. 444 // Used by release_obj_field_put, so use release_store_ptr.
440 oopDesc::release_encode_store_heap_oop(p, v); 445 oopDesc::release_encode_store_heap_oop(p, v);
441 update_barrier_set((void*)p, v); 446 update_barrier_set((void*)p, v);
442 } 447 }
443 448
681 SpecializationStats::record_call(); \ 686 SpecializationStats::record_call(); \
682 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ 687 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
683 } 688 }
684 689
685 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) 690 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
686 ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DEFN) 691 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
687 692
693 #ifndef SERIALGC
694 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
695 \
696 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
697 SpecializationStats::record_call(); \
698 return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
699 }
700
701 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
702 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
703 #endif // !SERIALGC
688 704
689 inline bool oopDesc::is_shared() const { 705 inline bool oopDesc::is_shared() const {
690 return CompactingPermGenGen::is_shared(this); 706 return CompactingPermGenGen::is_shared(this);
691 } 707 }
692 708