comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 845:df6caf649ff7

6700789: G1: Enable use of compressed oops with G1 heaps Summary: Modifications to G1 so as to allow the use of compressed oops. Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp
author ysr
date Tue, 14 Jul 2009 15:40:39 -0700
parents 0316eac49d5a
children 42d84bbbecf4
comparison
equal deleted inserted replaced
839:bb18957ad21e 845:df6caf649ff7
1653 1653
1654 1654
1655 // Computes the sum of the storage used by the various regions. 1655 // Computes the sum of the storage used by the various regions.
1656 1656
1657 size_t G1CollectedHeap::used() const { 1657 size_t G1CollectedHeap::used() const {
1658 assert(Heap_lock->owner() != NULL, 1658 // Temporarily, until 6859911 is fixed. XXX
1659 "Should be owned on this thread's behalf."); 1659 // assert(Heap_lock->owner() != NULL,
1660 // "Should be owned on this thread's behalf.");
1660 size_t result = _summary_bytes_used; 1661 size_t result = _summary_bytes_used;
1661 if (_cur_alloc_region != NULL) 1662 // Read only once in case it is set to NULL concurrently
1662 result += _cur_alloc_region->used(); 1663 HeapRegion* hr = _cur_alloc_region;
1664 if (hr != NULL)
1665 result += hr->used();
1663 return result; 1666 return result;
1664 } 1667 }
1665 1668
1666 class SumUsedClosure: public HeapRegionClosure { 1669 class SumUsedClosure: public HeapRegionClosure {
1667 size_t _used; 1670 size_t _used;
2131 G1CollectedHeap* g1h; 2134 G1CollectedHeap* g1h;
2132 public: 2135 public:
2133 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { 2136 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
2134 g1h = _g1h; 2137 g1h = _g1h;
2135 } 2138 }
2136 void do_oop(narrowOop *p) { 2139 void do_oop(narrowOop *p) { do_oop_work(p); }
2137 guarantee(false, "NYI"); 2140 void do_oop( oop *p) { do_oop_work(p); }
2138 } 2141
2139 void do_oop(oop *p) { 2142 template <class T> void do_oop_work(T *p) {
2140 oop obj = *p; 2143 oop obj = oopDesc::load_decode_heap_oop(p);
2141 assert(obj == NULL || !g1h->is_obj_dead(obj), 2144 guarantee(obj == NULL || !g1h->is_obj_dead(obj),
2142 "Dead object referenced by a not dead object"); 2145 "Dead object referenced by a not dead object");
2143 } 2146 }
2144 }; 2147 };
2145 2148
2146 class VerifyObjsInRegionClosure: public ObjectClosure { 2149 class VerifyObjsInRegionClosure: public ObjectClosure {
2147 private: 2150 private:
2204 bool _use_prev_marking; 2207 bool _use_prev_marking;
2205 public: 2208 public:
2206 // use_prev_marking == true -> use "prev" marking information, 2209 // use_prev_marking == true -> use "prev" marking information,
2207 // use_prev_marking == false -> use "next" marking information 2210 // use_prev_marking == false -> use "next" marking information
2208 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) 2211 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
2209 : _allow_dirty(allow_dirty), _par(par), 2212 : _allow_dirty(allow_dirty),
2213 _par(par),
2210 _use_prev_marking(use_prev_marking) {} 2214 _use_prev_marking(use_prev_marking) {}
2215
2211 bool doHeapRegion(HeapRegion* r) { 2216 bool doHeapRegion(HeapRegion* r) {
2212 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, 2217 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2213 "Should be unclaimed at verify points."); 2218 "Should be unclaimed at verify points.");
2214 if (!r->continuesHumongous()) { 2219 if (!r->continuesHumongous()) {
2215 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); 2220 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
2229 bool _use_prev_marking; 2234 bool _use_prev_marking;
2230 public: 2235 public:
2231 // use_prev_marking == true -> use "prev" marking information, 2236 // use_prev_marking == true -> use "prev" marking information,
2232 // use_prev_marking == false -> use "next" marking information 2237 // use_prev_marking == false -> use "next" marking information
2233 VerifyRootsClosure(bool use_prev_marking) : 2238 VerifyRootsClosure(bool use_prev_marking) :
2234 _g1h(G1CollectedHeap::heap()), _failures(false), 2239 _g1h(G1CollectedHeap::heap()),
2240 _failures(false),
2235 _use_prev_marking(use_prev_marking) { } 2241 _use_prev_marking(use_prev_marking) { }
2236 2242
2237 bool failures() { return _failures; } 2243 bool failures() { return _failures; }
2238 2244
2239 void do_oop(narrowOop* p) { 2245 template <class T> void do_oop_nv(T* p) {
2240 guarantee(false, "NYI"); 2246 T heap_oop = oopDesc::load_heap_oop(p);
2241 } 2247 if (!oopDesc::is_null(heap_oop)) {
2242 2248 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2243 void do_oop(oop* p) {
2244 oop obj = *p;
2245 if (obj != NULL) {
2246 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { 2249 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
2247 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " 2250 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
2248 "points to dead obj "PTR_FORMAT, p, (void*) obj); 2251 "points to dead obj "PTR_FORMAT, p, (void*) obj);
2249 obj->print_on(gclog_or_tty); 2252 obj->print_on(gclog_or_tty);
2250 _failures = true; 2253 _failures = true;
2251 } 2254 }
2252 } 2255 }
2253 } 2256 }
2257
2258 void do_oop(oop* p) { do_oop_nv(p); }
2259 void do_oop(narrowOop* p) { do_oop_nv(p); }
2254 }; 2260 };
2255 2261
2256 // This is the task used for parallel heap verification. 2262 // This is the task used for parallel heap verification.
2257 2263
2258 class G1ParVerifyTask: public AbstractGangTask { 2264 class G1ParVerifyTask: public AbstractGangTask {
2265 // use_prev_marking == true -> use "prev" marking information, 2271 // use_prev_marking == true -> use "prev" marking information,
2266 // use_prev_marking == false -> use "next" marking information 2272 // use_prev_marking == false -> use "next" marking information
2267 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, 2273 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
2268 bool use_prev_marking) : 2274 bool use_prev_marking) :
2269 AbstractGangTask("Parallel verify task"), 2275 AbstractGangTask("Parallel verify task"),
2270 _g1h(g1h), _allow_dirty(allow_dirty), 2276 _g1h(g1h),
2277 _allow_dirty(allow_dirty),
2271 _use_prev_marking(use_prev_marking) { } 2278 _use_prev_marking(use_prev_marking) { }
2272 2279
2273 void work(int worker_i) { 2280 void work(int worker_i) {
2274 HandleMark hm; 2281 HandleMark hm;
2275 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); 2282 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
2477 } 2484 }
2478 } 2485 }
2479 2486
2480 void 2487 void
2481 G1CollectedHeap::doConcurrentMark() { 2488 G1CollectedHeap::doConcurrentMark() {
2482 if (G1ConcMark) { 2489 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2483 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2490 if (!_cmThread->in_progress()) {
2484 if (!_cmThread->in_progress()) { 2491 _cmThread->set_started();
2485 _cmThread->set_started(); 2492 CGC_lock->notify();
2486 CGC_lock->notify();
2487 }
2488 } 2493 }
2489 } 2494 }
2490 2495
2491 class VerifyMarkedObjsClosure: public ObjectClosure { 2496 class VerifyMarkedObjsClosure: public ObjectClosure {
2492 G1CollectedHeap* _g1h; 2497 G1CollectedHeap* _g1h;
2559 if (_surviving_young_words == NULL) { 2564 if (_surviving_young_words == NULL) {
2560 vm_exit_out_of_memory(sizeof(size_t) * array_length, 2565 vm_exit_out_of_memory(sizeof(size_t) * array_length,
2561 "Not enough space for young surv words summary."); 2566 "Not enough space for young surv words summary.");
2562 } 2567 }
2563 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); 2568 memset(_surviving_young_words, 0, array_length * sizeof(size_t));
2569 #ifdef ASSERT
2564 for (size_t i = 0; i < array_length; ++i) { 2570 for (size_t i = 0; i < array_length; ++i) {
2565 guarantee( _surviving_young_words[i] == 0, "invariant" ); 2571 assert( _surviving_young_words[i] == 0, "memset above" );
2566 } 2572 }
2573 #endif // !ASSERT
2567 } 2574 }
2568 2575
2569 void 2576 void
2570 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { 2577 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
2571 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2578 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2647 } 2654 }
2648 2655
2649 COMPILER2_PRESENT(DerivedPointerTable::clear()); 2656 COMPILER2_PRESENT(DerivedPointerTable::clear());
2650 2657
2651 // We want to turn off ref discovery, if necessary, and turn it back on 2658 // We want to turn off ref discovery, if necessary, and turn it back on
2652 // on again later if we do. 2659 // on again later if we do. XXX Dubious: why is discovery disabled?
2653 bool was_enabled = ref_processor()->discovery_enabled(); 2660 bool was_enabled = ref_processor()->discovery_enabled();
2654 if (was_enabled) ref_processor()->disable_discovery(); 2661 if (was_enabled) ref_processor()->disable_discovery();
2655 2662
2656 // Forget the current alloc region (we might even choose it to be part 2663 // Forget the current alloc region (we might even choose it to be part
2657 // of the collection set!). 2664 // of the collection set!).
2660 // The elapsed time induced by the start time below deliberately elides 2667 // The elapsed time induced by the start time below deliberately elides
2661 // the possible verification above. 2668 // the possible verification above.
2662 double start_time_sec = os::elapsedTime(); 2669 double start_time_sec = os::elapsedTime();
2663 GCOverheadReporter::recordSTWStart(start_time_sec); 2670 GCOverheadReporter::recordSTWStart(start_time_sec);
2664 size_t start_used_bytes = used(); 2671 size_t start_used_bytes = used();
2665 if (!G1ConcMark) {
2666 do_sync_mark();
2667 }
2668 2672
2669 g1_policy()->record_collection_pause_start(start_time_sec, 2673 g1_policy()->record_collection_pause_start(start_time_sec,
2670 start_used_bytes); 2674 start_used_bytes);
2671 2675
2672 guarantee(_in_cset_fast_test == NULL, "invariant"); 2676 guarantee(_in_cset_fast_test == NULL, "invariant");
2773 2777
2774 if (g1_policy()->in_young_gc_mode() && 2778 if (g1_policy()->in_young_gc_mode() &&
2775 g1_policy()->should_initiate_conc_mark()) { 2779 g1_policy()->should_initiate_conc_mark()) {
2776 concurrent_mark()->checkpointRootsInitialPost(); 2780 concurrent_mark()->checkpointRootsInitialPost();
2777 set_marking_started(); 2781 set_marking_started();
2782 // CAUTION: after the doConcurrentMark() call below,
2783 // the concurrent marking thread(s) could be running
2784 // concurrently with us. Make sure that anything after
2785 // this point does not assume that we are the only GC thread
2786 // running. Note: of course, the actual marking work will
2787 // not start until the safepoint itself is released in
2788 // ConcurrentGCThread::safepoint_desynchronize().
2778 doConcurrentMark(); 2789 doConcurrentMark();
2779 } 2790 }
2780 2791
2781 #if SCAN_ONLY_VERBOSE 2792 #if SCAN_ONLY_VERBOSE
2782 _young_list->print(); 2793 _young_list->print();
3121 3132
3122 class G1KeepAliveClosure: public OopClosure { 3133 class G1KeepAliveClosure: public OopClosure {
3123 G1CollectedHeap* _g1; 3134 G1CollectedHeap* _g1;
3124 public: 3135 public:
3125 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3136 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3126 void do_oop(narrowOop* p) { 3137 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3127 guarantee(false, "NYI"); 3138 void do_oop( oop* p) {
3128 }
3129 void do_oop(oop* p) {
3130 oop obj = *p; 3139 oop obj = *p;
3131 #ifdef G1_DEBUG 3140 #ifdef G1_DEBUG
3132 if (PrintGC && Verbose) { 3141 if (PrintGC && Verbose) {
3133 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, 3142 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
3134 p, (void*) obj, (void*) *p); 3143 p, (void*) obj, (void*) *p);
3136 #endif // G1_DEBUG 3145 #endif // G1_DEBUG
3137 3146
3138 if (_g1->obj_in_cs(obj)) { 3147 if (_g1->obj_in_cs(obj)) {
3139 assert( obj->is_forwarded(), "invariant" ); 3148 assert( obj->is_forwarded(), "invariant" );
3140 *p = obj->forwardee(); 3149 *p = obj->forwardee();
3141
3142 #ifdef G1_DEBUG 3150 #ifdef G1_DEBUG
3143 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, 3151 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
3144 (void*) obj, (void*) *p); 3152 (void*) obj, (void*) *p);
3145 #endif // G1_DEBUG 3153 #endif // G1_DEBUG
3146 } 3154 }
3153 G1RemSet* _g1_rem_set; 3161 G1RemSet* _g1_rem_set;
3154 public: 3162 public:
3155 UpdateRSetImmediate(G1CollectedHeap* g1) : 3163 UpdateRSetImmediate(G1CollectedHeap* g1) :
3156 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} 3164 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
3157 3165
3158 void do_oop(narrowOop* p) { 3166 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3159 guarantee(false, "NYI"); 3167 virtual void do_oop( oop* p) { do_oop_work(p); }
3160 } 3168 template <class T> void do_oop_work(T* p) {
3161 void do_oop(oop* p) {
3162 assert(_from->is_in_reserved(p), "paranoia"); 3169 assert(_from->is_in_reserved(p), "paranoia");
3163 if (*p != NULL && !_from->is_survivor()) { 3170 T heap_oop = oopDesc::load_heap_oop(p);
3171 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
3164 _g1_rem_set->par_write_ref(_from, p, 0); 3172 _g1_rem_set->par_write_ref(_from, p, 0);
3165 } 3173 }
3166 } 3174 }
3167 }; 3175 };
3168 3176
3174 3182
3175 public: 3183 public:
3176 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 3184 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
3177 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} 3185 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
3178 3186
3179 void do_oop(narrowOop* p) { 3187 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3180 guarantee(false, "NYI"); 3188 virtual void do_oop( oop* p) { do_oop_work(p); }
3181 } 3189 template <class T> void do_oop_work(T* p) {
3182 void do_oop(oop* p) {
3183 assert(_from->is_in_reserved(p), "paranoia"); 3190 assert(_from->is_in_reserved(p), "paranoia");
3184 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) { 3191 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
3192 !_from->is_survivor()) {
3185 size_t card_index = _ct_bs->index_for(p); 3193 size_t card_index = _ct_bs->index_for(p);
3186 if (_ct_bs->mark_card_deferred(card_index)) { 3194 if (_ct_bs->mark_card_deferred(card_index)) {
3187 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 3195 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
3188 } 3196 }
3189 } 3197 }
3534 block = r->par_allocate(free_words); 3542 block = r->par_allocate(free_words);
3535 } while (block == NULL); 3543 } while (block == NULL);
3536 fill_with_object(block, free_words); 3544 fill_with_object(block, free_words);
3537 } 3545 }
3538 3546
3539 #define use_local_bitmaps 1
3540 #define verify_local_bitmaps 0
3541
3542 #ifndef PRODUCT 3547 #ifndef PRODUCT
3543
3544 class GCLabBitMap;
3545 class GCLabBitMapClosure: public BitMapClosure {
3546 private:
3547 ConcurrentMark* _cm;
3548 GCLabBitMap* _bitmap;
3549
3550 public:
3551 GCLabBitMapClosure(ConcurrentMark* cm,
3552 GCLabBitMap* bitmap) {
3553 _cm = cm;
3554 _bitmap = bitmap;
3555 }
3556
3557 virtual bool do_bit(size_t offset);
3558 };
3559
3560 #endif // PRODUCT
3561
3562 #define oop_buffer_length 256
3563
3564 class GCLabBitMap: public BitMap {
3565 private:
3566 ConcurrentMark* _cm;
3567
3568 int _shifter;
3569 size_t _bitmap_word_covers_words;
3570
3571 // beginning of the heap
3572 HeapWord* _heap_start;
3573
3574 // this is the actual start of the GCLab
3575 HeapWord* _real_start_word;
3576
3577 // this is the actual end of the GCLab
3578 HeapWord* _real_end_word;
3579
3580 // this is the first word, possibly located before the actual start
3581 // of the GCLab, that corresponds to the first bit of the bitmap
3582 HeapWord* _start_word;
3583
3584 // size of a GCLab in words
3585 size_t _gclab_word_size;
3586
3587 static int shifter() {
3588 return MinObjAlignment - 1;
3589 }
3590
3591 // how many heap words does a single bitmap word corresponds to?
3592 static size_t bitmap_word_covers_words() {
3593 return BitsPerWord << shifter();
3594 }
3595
3596 static size_t gclab_word_size() {
3597 return G1ParallelGCAllocBufferSize / HeapWordSize;
3598 }
3599
3600 static size_t bitmap_size_in_bits() {
3601 size_t bits_in_bitmap = gclab_word_size() >> shifter();
3602 // We are going to ensure that the beginning of a word in this
3603 // bitmap also corresponds to the beginning of a word in the
3604 // global marking bitmap. To handle the case where a GCLab
3605 // starts from the middle of the bitmap, we need to add enough
3606 // space (i.e. up to a bitmap word) to ensure that we have
3607 // enough bits in the bitmap.
3608 return bits_in_bitmap + BitsPerWord - 1;
3609 }
3610 public:
3611 GCLabBitMap(HeapWord* heap_start)
3612 : BitMap(bitmap_size_in_bits()),
3613 _cm(G1CollectedHeap::heap()->concurrent_mark()),
3614 _shifter(shifter()),
3615 _bitmap_word_covers_words(bitmap_word_covers_words()),
3616 _heap_start(heap_start),
3617 _gclab_word_size(gclab_word_size()),
3618 _real_start_word(NULL),
3619 _real_end_word(NULL),
3620 _start_word(NULL)
3621 {
3622 guarantee( size_in_words() >= bitmap_size_in_words(),
3623 "just making sure");
3624 }
3625
3626 inline unsigned heapWordToOffset(HeapWord* addr) {
3627 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
3628 assert(offset < size(), "offset should be within bounds");
3629 return offset;
3630 }
3631
3632 inline HeapWord* offsetToHeapWord(size_t offset) {
3633 HeapWord* addr = _start_word + (offset << _shifter);
3634 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
3635 return addr;
3636 }
3637
3638 bool fields_well_formed() {
3639 bool ret1 = (_real_start_word == NULL) &&
3640 (_real_end_word == NULL) &&
3641 (_start_word == NULL);
3642 if (ret1)
3643 return true;
3644
3645 bool ret2 = _real_start_word >= _start_word &&
3646 _start_word < _real_end_word &&
3647 (_real_start_word + _gclab_word_size) == _real_end_word &&
3648 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
3649 > _real_end_word;
3650 return ret2;
3651 }
3652
3653 inline bool mark(HeapWord* addr) {
3654 guarantee(use_local_bitmaps, "invariant");
3655 assert(fields_well_formed(), "invariant");
3656
3657 if (addr >= _real_start_word && addr < _real_end_word) {
3658 assert(!isMarked(addr), "should not have already been marked");
3659
3660 // first mark it on the bitmap
3661 at_put(heapWordToOffset(addr), true);
3662
3663 return true;
3664 } else {
3665 return false;
3666 }
3667 }
3668
3669 inline bool isMarked(HeapWord* addr) {
3670 guarantee(use_local_bitmaps, "invariant");
3671 assert(fields_well_formed(), "invariant");
3672
3673 return at(heapWordToOffset(addr));
3674 }
3675
3676 void set_buffer(HeapWord* start) {
3677 guarantee(use_local_bitmaps, "invariant");
3678 clear();
3679
3680 assert(start != NULL, "invariant");
3681 _real_start_word = start;
3682 _real_end_word = start + _gclab_word_size;
3683
3684 size_t diff =
3685 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
3686 _start_word = start - diff;
3687
3688 assert(fields_well_formed(), "invariant");
3689 }
3690
3691 #ifndef PRODUCT
3692 void verify() {
3693 // verify that the marks have been propagated
3694 GCLabBitMapClosure cl(_cm, this);
3695 iterate(&cl);
3696 }
3697 #endif // PRODUCT
3698
3699 void retire() {
3700 guarantee(use_local_bitmaps, "invariant");
3701 assert(fields_well_formed(), "invariant");
3702
3703 if (_start_word != NULL) {
3704 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
3705
3706 // this means that the bitmap was set up for the GCLab
3707 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
3708
3709 mark_bitmap->mostly_disjoint_range_union(this,
3710 0, // always start from the start of the bitmap
3711 _start_word,
3712 size_in_words());
3713 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
3714
3715 #ifndef PRODUCT
3716 if (use_local_bitmaps && verify_local_bitmaps)
3717 verify();
3718 #endif // PRODUCT
3719 } else {
3720 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
3721 }
3722 }
3723
3724 static size_t bitmap_size_in_words() {
3725 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
3726 }
3727 };
3728
3729 #ifndef PRODUCT
3730
3731 bool GCLabBitMapClosure::do_bit(size_t offset) { 3548 bool GCLabBitMapClosure::do_bit(size_t offset) {
3732 HeapWord* addr = _bitmap->offsetToHeapWord(offset); 3549 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
3733 guarantee(_cm->isMarked(oop(addr)), "it should be!"); 3550 guarantee(_cm->isMarked(oop(addr)), "it should be!");
3734 return true; 3551 return true;
3735 } 3552 }
3736
3737 #endif // PRODUCT 3553 #endif // PRODUCT
3738 3554
3739 class G1ParGCAllocBuffer: public ParGCAllocBuffer { 3555 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3740 private: 3556 : _g1h(g1h),
3741 bool _retired; 3557 _refs(g1h->task_queue(queue_num)),
3742 bool _during_marking; 3558 _dcq(&g1h->dirty_card_queue_set()),
3743 GCLabBitMap _bitmap; 3559 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3744 3560 _g1_rem(g1h->g1_rem_set()),
3745 public: 3561 _hash_seed(17), _queue_num(queue_num),
3746 G1ParGCAllocBuffer() : 3562 _term_attempts(0),
3747 ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize), 3563 _age_table(false),
3748 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
3749 _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
3750 _retired(false)
3751 { }
3752
3753 inline bool mark(HeapWord* addr) {
3754 guarantee(use_local_bitmaps, "invariant");
3755 assert(_during_marking, "invariant");
3756 return _bitmap.mark(addr);
3757 }
3758
3759 inline void set_buf(HeapWord* buf) {
3760 if (use_local_bitmaps && _during_marking)
3761 _bitmap.set_buffer(buf);
3762 ParGCAllocBuffer::set_buf(buf);
3763 _retired = false;
3764 }
3765
3766 inline void retire(bool end_of_gc, bool retain) {
3767 if (_retired)
3768 return;
3769 if (use_local_bitmaps && _during_marking) {
3770 _bitmap.retire();
3771 }
3772 ParGCAllocBuffer::retire(end_of_gc, retain);
3773 _retired = true;
3774 }
3775 };
3776
3777
3778 class G1ParScanThreadState : public StackObj {
3779 protected:
3780 G1CollectedHeap* _g1h;
3781 RefToScanQueue* _refs;
3782 DirtyCardQueue _dcq;
3783 CardTableModRefBS* _ct_bs;
3784 G1RemSet* _g1_rem;
3785
3786 typedef GrowableArray<oop*> OverflowQueue;
3787 OverflowQueue* _overflowed_refs;
3788
3789 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
3790 ageTable _age_table;
3791
3792 size_t _alloc_buffer_waste;
3793 size_t _undo_waste;
3794
3795 OopsInHeapRegionClosure* _evac_failure_cl;
3796 G1ParScanHeapEvacClosure* _evac_cl;
3797 G1ParScanPartialArrayClosure* _partial_scan_cl;
3798
3799 int _hash_seed;
3800 int _queue_num;
3801
3802 int _term_attempts;
3803 #if G1_DETAILED_STATS 3564 #if G1_DETAILED_STATS
3804 int _pushes, _pops, _steals, _steal_attempts; 3565 _pushes(0), _pops(0), _steals(0),
3805 int _overflow_pushes; 3566 _steal_attempts(0), _overflow_pushes(0),
3806 #endif 3567 #endif
3807 3568 _strong_roots_time(0), _term_time(0),
3808 double _start; 3569 _alloc_buffer_waste(0), _undo_waste(0)
3809 double _start_strong_roots; 3570 {
3810 double _strong_roots_time; 3571 // we allocate G1YoungSurvRateNumRegions plus one entries, since
3811 double _start_term; 3572 // we "sacrifice" entry 0 to keep track of surviving bytes for
3812 double _term_time; 3573 // non-young regions (where the age is -1)
3813 3574 // We also add a few elements at the beginning and at the end in
3814 // Map from young-age-index (0 == not young, 1 is youngest) to 3575 // an attempt to eliminate cache contention
3815 // surviving words. base is what we get back from the malloc call 3576 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
3816 size_t* _surviving_young_words_base; 3577 size_t array_length = PADDING_ELEM_NUM +
3817 // this points into the array, as we use the first few entries for padding 3578 real_length +
3818 size_t* _surviving_young_words; 3579 PADDING_ELEM_NUM;
3819 3580 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
3820 #define PADDING_ELEM_NUM (64 / sizeof(size_t)) 3581 if (_surviving_young_words_base == NULL)
3821 3582 vm_exit_out_of_memory(array_length * sizeof(size_t),
3822 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 3583 "Not enough space for young surv histo.");
3823 3584 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
3824 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 3585 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
3825 3586
3826 DirtyCardQueue& dirty_card_queue() { return _dcq; } 3587 _overflowed_refs = new OverflowQueue(10);
3827 CardTableModRefBS* ctbs() { return _ct_bs; } 3588
3828 3589 _start = os::elapsedTime();
3829 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { 3590 }
3830 if (!from->is_survivor()) {
3831 _g1_rem->par_write_ref(from, p, tid);
3832 }
3833 }
3834
3835 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3836 // If the new value of the field points to the same region or
3837 // is the to-space, we don't need to include it in the Rset updates.
3838 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3839 size_t card_index = ctbs()->index_for(p);
3840 // If the card hasn't been added to the buffer, do it.
3841 if (ctbs()->mark_card_deferred(card_index)) {
3842 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3843 }
3844 }
3845 }
3846
3847 public:
3848 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3849 : _g1h(g1h),
3850 _refs(g1h->task_queue(queue_num)),
3851 _dcq(&g1h->dirty_card_queue_set()),
3852 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3853 _g1_rem(g1h->g1_rem_set()),
3854 _hash_seed(17), _queue_num(queue_num),
3855 _term_attempts(0),
3856 _age_table(false),
3857 #if G1_DETAILED_STATS
3858 _pushes(0), _pops(0), _steals(0),
3859 _steal_attempts(0), _overflow_pushes(0),
3860 #endif
3861 _strong_roots_time(0), _term_time(0),
3862 _alloc_buffer_waste(0), _undo_waste(0)
3863 {
3864 // we allocate G1YoungSurvRateNumRegions plus one entries, since
3865 // we "sacrifice" entry 0 to keep track of surviving bytes for
3866 // non-young regions (where the age is -1)
3867 // We also add a few elements at the beginning and at the end in
3868 // an attempt to eliminate cache contention
3869 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
3870 size_t array_length = PADDING_ELEM_NUM +
3871 real_length +
3872 PADDING_ELEM_NUM;
3873 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
3874 if (_surviving_young_words_base == NULL)
3875 vm_exit_out_of_memory(array_length * sizeof(size_t),
3876 "Not enough space for young surv histo.");
3877 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
3878 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
3879
3880 _overflowed_refs = new OverflowQueue(10);
3881
3882 _start = os::elapsedTime();
3883 }
3884
3885 ~G1ParScanThreadState() {
3886 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
3887 }
3888
3889 RefToScanQueue* refs() { return _refs; }
3890 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
3891 ageTable* age_table() { return &_age_table; }
3892
3893 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
3894 return &_alloc_buffers[purpose];
3895 }
3896
3897 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
3898 size_t undo_waste() { return _undo_waste; }
3899
3900 void push_on_queue(oop* ref) {
3901 assert(ref != NULL, "invariant");
3902 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
3903
3904 if (!refs()->push(ref)) {
3905 overflowed_refs()->push(ref);
3906 IF_G1_DETAILED_STATS(note_overflow_push());
3907 } else {
3908 IF_G1_DETAILED_STATS(note_push());
3909 }
3910 }
3911
3912 void pop_from_queue(oop*& ref) {
3913 if (!refs()->pop_local(ref)) {
3914 ref = NULL;
3915 } else {
3916 assert(ref != NULL, "invariant");
3917 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
3918 "invariant");
3919
3920 IF_G1_DETAILED_STATS(note_pop());
3921 }
3922 }
3923
3924 void pop_from_overflow_queue(oop*& ref) {
3925 ref = overflowed_refs()->pop();
3926 }
3927
3928 int refs_to_scan() { return refs()->size(); }
3929 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
3930
3931 void update_rs(HeapRegion* from, oop* p, int tid) {
3932 if (G1DeferredRSUpdate) {
3933 deferred_rs_update(from, p, tid);
3934 } else {
3935 immediate_rs_update(from, p, tid);
3936 }
3937 }
3938
3939 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
3940
3941 HeapWord* obj = NULL;
3942 if (word_sz * 100 <
3943 (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
3944 ParallelGCBufferWastePct) {
3945 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
3946 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
3947 alloc_buf->retire(false, false);
3948
3949 HeapWord* buf =
3950 _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
3951 if (buf == NULL) return NULL; // Let caller handle allocation failure.
3952 // Otherwise.
3953 alloc_buf->set_buf(buf);
3954
3955 obj = alloc_buf->allocate(word_sz);
3956 assert(obj != NULL, "buffer was definitely big enough...");
3957 } else {
3958 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
3959 }
3960 return obj;
3961 }
3962
3963 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
3964 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
3965 if (obj != NULL) return obj;
3966 return allocate_slow(purpose, word_sz);
3967 }
3968
3969 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
3970 if (alloc_buffer(purpose)->contains(obj)) {
3971 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
3972 "should contain whole object");
3973 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
3974 } else {
3975 CollectedHeap::fill_with_object(obj, word_sz);
3976 add_to_undo_waste(word_sz);
3977 }
3978 }
3979
3980 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
3981 _evac_failure_cl = evac_failure_cl;
3982 }
3983 OopsInHeapRegionClosure* evac_failure_closure() {
3984 return _evac_failure_cl;
3985 }
3986
3987 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
3988 _evac_cl = evac_cl;
3989 }
3990
3991 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
3992 _partial_scan_cl = partial_scan_cl;
3993 }
3994
3995 int* hash_seed() { return &_hash_seed; }
3996 int queue_num() { return _queue_num; }
3997
3998 int term_attempts() { return _term_attempts; }
3999 void note_term_attempt() { _term_attempts++; }
4000
4001 #if G1_DETAILED_STATS
4002 int pushes() { return _pushes; }
4003 int pops() { return _pops; }
4004 int steals() { return _steals; }
4005 int steal_attempts() { return _steal_attempts; }
4006 int overflow_pushes() { return _overflow_pushes; }
4007
4008 void note_push() { _pushes++; }
4009 void note_pop() { _pops++; }
4010 void note_steal() { _steals++; }
4011 void note_steal_attempt() { _steal_attempts++; }
4012 void note_overflow_push() { _overflow_pushes++; }
4013 #endif
4014
4015 void start_strong_roots() {
4016 _start_strong_roots = os::elapsedTime();
4017 }
4018 void end_strong_roots() {
4019 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
4020 }
4021 double strong_roots_time() { return _strong_roots_time; }
4022
4023 void start_term_time() {
4024 note_term_attempt();
4025 _start_term = os::elapsedTime();
4026 }
4027 void end_term_time() {
4028 _term_time += (os::elapsedTime() - _start_term);
4029 }
4030 double term_time() { return _term_time; }
4031
4032 double elapsed() {
4033 return os::elapsedTime() - _start;
4034 }
4035
4036 size_t* surviving_young_words() {
4037 // We add on to hide entry 0 which accumulates surviving words for
4038 // age -1 regions (i.e. non-young ones)
4039 return _surviving_young_words;
4040 }
4041
4042 void retire_alloc_buffers() {
4043 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
4044 size_t waste = _alloc_buffers[ap].words_remaining();
4045 add_to_alloc_buffer_waste(waste);
4046 _alloc_buffers[ap].retire(true, false);
4047 }
4048 }
4049
4050 private:
4051 void deal_with_reference(oop* ref_to_scan) {
4052 if (has_partial_array_mask(ref_to_scan)) {
4053 _partial_scan_cl->do_oop_nv(ref_to_scan);
4054 } else {
4055 // Note: we can use "raw" versions of "region_containing" because
4056 // "obj_to_scan" is definitely in the heap, and is not in a
4057 // humongous region.
4058 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
4059 _evac_cl->set_region(r);
4060 _evac_cl->do_oop_nv(ref_to_scan);
4061 }
4062 }
4063
4064 public:
4065 void trim_queue() {
4066 // I've replicated the loop twice, first to drain the overflow
4067 // queue, second to drain the task queue. This is better than
4068 // having a single loop, which checks both conditions and, inside
4069 // it, either pops the overflow queue or the task queue, as each
4070 // loop is tighter. Also, the decision to drain the overflow queue
4071 // first is not arbitrary, as the overflow queue is not visible
4072 // to the other workers, whereas the task queue is. So, we want to
4073 // drain the "invisible" entries first, while allowing the other
4074 // workers to potentially steal the "visible" entries.
4075
4076 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
4077 while (overflowed_refs_to_scan() > 0) {
4078 oop *ref_to_scan = NULL;
4079 pop_from_overflow_queue(ref_to_scan);
4080 assert(ref_to_scan != NULL, "invariant");
4081 // We shouldn't have pushed it on the queue if it was not
4082 // pointing into the CSet.
4083 assert(ref_to_scan != NULL, "sanity");
4084 assert(has_partial_array_mask(ref_to_scan) ||
4085 _g1h->obj_in_cs(*ref_to_scan), "sanity");
4086
4087 deal_with_reference(ref_to_scan);
4088 }
4089
4090 while (refs_to_scan() > 0) {
4091 oop *ref_to_scan = NULL;
4092 pop_from_queue(ref_to_scan);
4093
4094 if (ref_to_scan != NULL) {
4095 // We shouldn't have pushed it on the queue if it was not
4096 // pointing into the CSet.
4097 assert(has_partial_array_mask(ref_to_scan) ||
4098 _g1h->obj_in_cs(*ref_to_scan), "sanity");
4099
4100 deal_with_reference(ref_to_scan);
4101 }
4102 }
4103 }
4104 }
4105 };
4106 3591
4107 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : 3592 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
4108 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 3593 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4109 _par_scan_state(par_scan_state) { } 3594 _par_scan_state(par_scan_state) { }
4110 3595
4111 // This closure is applied to the fields of the objects that have just been copied. 3596 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
4112 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
4113 void G1ParScanClosure::do_oop_nv(oop* p) {
4114 oop obj = *p;
4115
4116 if (obj != NULL) {
4117 if (_g1->in_cset_fast_test(obj)) {
4118 // We're not going to even bother checking whether the object is
4119 // already forwarded or not, as this usually causes an immediate
4120 // stall. We'll try to prefetch the object (for write, given that
4121 // we might need to install the forwarding reference) and we'll
4122 // get back to it when pop it from the queue
4123 Prefetch::write(obj->mark_addr(), 0);
4124 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
4125
4126 // slightly paranoid test; I'm trying to catch potential
4127 // problems before we go into push_on_queue to know where the
4128 // problem is coming from
4129 assert(obj == *p, "the value of *p should not have changed");
4130 _par_scan_state->push_on_queue(p);
4131 } else {
4132 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4133 }
4134 }
4135 }
4136
4137 void G1ParCopyHelper::mark_forwardee(oop* p) {
4138 // This is called _after_ do_oop_work has been called, hence after 3597 // This is called _after_ do_oop_work has been called, hence after
4139 // the object has been relocated to its new location and *p points 3598 // the object has been relocated to its new location and *p points
4140 // to its new location. 3599 // to its new location.
4141 3600
4142 oop thisOop = *p; 3601 T heap_oop = oopDesc::load_heap_oop(p);
4143 if (thisOop != NULL) { 3602 if (!oopDesc::is_null(heap_oop)) {
4144 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), 3603 oop obj = oopDesc::decode_heap_oop(heap_oop);
3604 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
4145 "shouldn't still be in the CSet if evacuation didn't fail."); 3605 "shouldn't still be in the CSet if evacuation didn't fail.");
4146 HeapWord* addr = (HeapWord*)thisOop; 3606 HeapWord* addr = (HeapWord*)obj;
4147 if (_g1->is_in_g1_reserved(addr)) 3607 if (_g1->is_in_g1_reserved(addr))
4148 _cm->grayRoot(oop(addr)); 3608 _cm->grayRoot(oop(addr));
4149 } 3609 }
4150 } 3610 }
4151 3611
4224 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 3684 size_t* surv_young_words = _par_scan_state->surviving_young_words();
4225 surv_young_words[young_index] += word_sz; 3685 surv_young_words[young_index] += word_sz;
4226 3686
4227 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 3687 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4228 arrayOop(old)->set_length(0); 3688 arrayOop(old)->set_length(0);
4229 _par_scan_state->push_on_queue(set_partial_array_mask(old)); 3689 oop* old_p = set_partial_array_mask(old);
3690 _par_scan_state->push_on_queue(old_p);
4230 } else { 3691 } else {
4231 // No point in using the slower heap_region_containing() method, 3692 // No point in using the slower heap_region_containing() method,
4232 // given that we know obj is in the heap. 3693 // given that we know obj is in the heap.
4233 _scanner->set_region(_g1->heap_region_containing_raw(obj)); 3694 _scanner->set_region(_g1->heap_region_containing_raw(obj));
4234 obj->oop_iterate_backwards(_scanner); 3695 obj->oop_iterate_backwards(_scanner);
4238 obj = forward_ptr; 3699 obj = forward_ptr;
4239 } 3700 }
4240 return obj; 3701 return obj;
4241 } 3702 }
4242 3703
4243 template<bool do_gen_barrier, G1Barrier barrier, 3704 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
4244 bool do_mark_forwardee, bool skip_cset_test> 3705 template <class T>
4245 void G1ParCopyClosure<do_gen_barrier, barrier, 3706 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
4246 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) { 3707 ::do_oop_work(T* p) {
4247 oop obj = *p; 3708 oop obj = oopDesc::load_decode_heap_oop(p);
4248 assert(barrier != G1BarrierRS || obj != NULL, 3709 assert(barrier != G1BarrierRS || obj != NULL,
4249 "Precondition: G1BarrierRS implies obj is nonNull"); 3710 "Precondition: G1BarrierRS implies obj is nonNull");
4250 3711
4251 // The only time we skip the cset test is when we're scanning 3712 // The only time we skip the cset test is when we're scanning
4252 // references popped from the queue. And we only push on the queue 3713 // references popped from the queue. And we only push on the queue
4259 #if G1_REM_SET_LOGGING 3720 #if G1_REM_SET_LOGGING
4260 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " 3721 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
4261 "into CS.", p, (void*) obj); 3722 "into CS.", p, (void*) obj);
4262 #endif 3723 #endif
4263 if (obj->is_forwarded()) { 3724 if (obj->is_forwarded()) {
4264 *p = obj->forwardee(); 3725 oopDesc::encode_store_heap_oop(p, obj->forwardee());
4265 } else { 3726 } else {
4266 *p = copy_to_survivor_space(obj); 3727 oop copy_oop = copy_to_survivor_space(obj);
3728 oopDesc::encode_store_heap_oop(p, copy_oop);
4267 } 3729 }
4268 // When scanning the RS, we only care about objs in CS. 3730 // When scanning the RS, we only care about objs in CS.
4269 if (barrier == G1BarrierRS) { 3731 if (barrier == G1BarrierRS) {
4270 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 3732 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4271 } 3733 }
4280 par_do_barrier(p); 3742 par_do_barrier(p);
4281 } 3743 }
4282 } 3744 }
4283 3745
4284 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); 3746 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
4285 3747 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
4286 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk( 3748
4287 oop obj, int start, int end) { 3749 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4288 // process our set of indices (include header in first chunk)
4289 assert(start < end, "invariant");
4290 T* const base = (T*)objArrayOop(obj)->base();
4291 T* const start_addr = (start == 0) ? (T*) obj : base + start;
4292 T* const end_addr = base + end;
4293 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
4294 _scanner.set_region(_g1->heap_region_containing(obj));
4295 obj->oop_iterate(&_scanner, mr);
4296 }
4297
4298 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
4299 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
4300 assert(has_partial_array_mask(p), "invariant"); 3750 assert(has_partial_array_mask(p), "invariant");
4301 oop old = clear_partial_array_mask(p); 3751 oop old = clear_partial_array_mask(p);
4302 assert(old->is_objArray(), "must be obj array"); 3752 assert(old->is_objArray(), "must be obj array");
4303 assert(old->is_forwarded(), "must be forwarded"); 3753 assert(old->is_forwarded(), "must be forwarded");
4304 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 3754 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
4314 if (remainder > 2 * ParGCArrayScanChunk) { 3764 if (remainder > 2 * ParGCArrayScanChunk) {
4315 // Test above combines last partial chunk with a full chunk 3765 // Test above combines last partial chunk with a full chunk
4316 end = start + ParGCArrayScanChunk; 3766 end = start + ParGCArrayScanChunk;
4317 arrayOop(old)->set_length(end); 3767 arrayOop(old)->set_length(end);
4318 // Push remainder. 3768 // Push remainder.
4319 _par_scan_state->push_on_queue(set_partial_array_mask(old)); 3769 oop* old_p = set_partial_array_mask(old);
3770 assert(arrayOop(old)->length() < obj->length(), "Empty push?");
3771 _par_scan_state->push_on_queue(old_p);
4320 } else { 3772 } else {
4321 // Restore length so that the heap remains parsable in 3773 // Restore length so that the heap remains parsable in
4322 // case of evacuation failure. 3774 // case of evacuation failure.
4323 arrayOop(old)->set_length(end); 3775 arrayOop(old)->set_length(end);
4324 } 3776 }
4325 3777 _scanner.set_region(_g1->heap_region_containing_raw(obj));
4326 // process our set of indices (include header in first chunk) 3778 // process our set of indices (include header in first chunk)
4327 process_array_chunk<oop>(obj, start, end); 3779 obj->oop_iterate_range(&_scanner, start, end);
4328 } 3780 }
4329
4330 int G1ScanAndBalanceClosure::_nq = 0;
4331 3781
4332 class G1ParEvacuateFollowersClosure : public VoidClosure { 3782 class G1ParEvacuateFollowersClosure : public VoidClosure {
4333 protected: 3783 protected:
4334 G1CollectedHeap* _g1h; 3784 G1CollectedHeap* _g1h;
4335 G1ParScanThreadState* _par_scan_state; 3785 G1ParScanThreadState* _par_scan_state;
4349 _queues(queues), _terminator(terminator) {} 3799 _queues(queues), _terminator(terminator) {}
4350 3800
4351 void do_void() { 3801 void do_void() {
4352 G1ParScanThreadState* pss = par_scan_state(); 3802 G1ParScanThreadState* pss = par_scan_state();
4353 while (true) { 3803 while (true) {
4354 oop* ref_to_scan;
4355 pss->trim_queue(); 3804 pss->trim_queue();
4356 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); 3805 IF_G1_DETAILED_STATS(pss->note_steal_attempt());
4357 if (queues()->steal(pss->queue_num(), 3806
4358 pss->hash_seed(), 3807 StarTask stolen_task;
4359 ref_to_scan)) { 3808 if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4360 IF_G1_DETAILED_STATS(pss->note_steal()); 3809 IF_G1_DETAILED_STATS(pss->note_steal());
4361 3810
4362 // slightly paranoid tests; I'm trying to catch potential 3811 // slightly paranoid tests; I'm trying to catch potential
4363 // problems before we go into push_on_queue to know where the 3812 // problems before we go into push_on_queue to know where the
4364 // problem is coming from 3813 // problem is coming from
4365 assert(ref_to_scan != NULL, "invariant"); 3814 assert((oop*)stolen_task != NULL, "Error");
4366 assert(has_partial_array_mask(ref_to_scan) || 3815 if (stolen_task.is_narrow()) {
4367 _g1h->obj_in_cs(*ref_to_scan), "invariant"); 3816 assert(UseCompressedOops, "Error");
4368 pss->push_on_queue(ref_to_scan); 3817 narrowOop* p = (narrowOop*) stolen_task;
3818 assert(has_partial_array_mask(p) ||
3819 _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
3820 pss->push_on_queue(p);
3821 } else {
3822 oop* p = (oop*) stolen_task;
3823 assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
3824 pss->push_on_queue(p);
3825 }
4369 continue; 3826 continue;
4370 } 3827 }
4371 pss->start_term_time(); 3828 pss->start_term_time();
4372 if (terminator()->offer_termination()) break; 3829 if (terminator()->offer_termination()) break;
4373 pss->end_term_time(); 3830 pss->end_term_time();
4380 class G1ParTask : public AbstractGangTask { 3837 class G1ParTask : public AbstractGangTask {
4381 protected: 3838 protected:
4382 G1CollectedHeap* _g1h; 3839 G1CollectedHeap* _g1h;
4383 RefToScanQueueSet *_queues; 3840 RefToScanQueueSet *_queues;
4384 ParallelTaskTerminator _terminator; 3841 ParallelTaskTerminator _terminator;
3842 int _n_workers;
4385 3843
4386 Mutex _stats_lock; 3844 Mutex _stats_lock;
4387 Mutex* stats_lock() { return &_stats_lock; } 3845 Mutex* stats_lock() { return &_stats_lock; }
4388 3846
4389 size_t getNCards() { 3847 size_t getNCards() {
4395 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) 3853 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
4396 : AbstractGangTask("G1 collection"), 3854 : AbstractGangTask("G1 collection"),
4397 _g1h(g1h), 3855 _g1h(g1h),
4398 _queues(task_queues), 3856 _queues(task_queues),
4399 _terminator(workers, _queues), 3857 _terminator(workers, _queues),
4400 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) 3858 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
3859 _n_workers(workers)
4401 {} 3860 {}
4402 3861
4403 RefToScanQueueSet* queues() { return _queues; } 3862 RefToScanQueueSet* queues() { return _queues; }
4404 3863
4405 RefToScanQueue *work_queue(int i) { 3864 RefToScanQueue *work_queue(int i) {
4406 return queues()->queue(i); 3865 return queues()->queue(i);
4407 } 3866 }
4408 3867
4409 void work(int i) { 3868 void work(int i) {
3869 if (i >= _n_workers) return; // no work needed this round
4410 ResourceMark rm; 3870 ResourceMark rm;
4411 HandleMark hm; 3871 HandleMark hm;
4412 3872
4413 G1ParScanThreadState pss(_g1h, i); 3873 G1ParScanThreadState pss(_g1h, i);
4414 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 3874 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
4501 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); 3961 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
4502 } 3962 }
4503 }; 3963 };
4504 3964
4505 // *** Common G1 Evacuation Stuff 3965 // *** Common G1 Evacuation Stuff
4506
4507 class G1CountClosure: public OopsInHeapRegionClosure {
4508 public:
4509 int n;
4510 G1CountClosure() : n(0) {}
4511 void do_oop(narrowOop* p) {
4512 guarantee(false, "NYI");
4513 }
4514 void do_oop(oop* p) {
4515 oop obj = *p;
4516 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
4517 "Rem set closure called on non-rem-set pointer.");
4518 n++;
4519 }
4520 };
4521
4522 static G1CountClosure count_closure;
4523 3966
4524 void 3967 void
4525 G1CollectedHeap:: 3968 G1CollectedHeap::
4526 g1_process_strong_roots(bool collecting_perm_gen, 3969 g1_process_strong_roots(bool collecting_perm_gen,
4527 SharedHeap::ScanningOption so, 3970 SharedHeap::ScanningOption so,
5568 #endif // PRODUCT 5011 #endif // PRODUCT
5569 5012
5570 void G1CollectedHeap::g1_unimplemented() { 5013 void G1CollectedHeap::g1_unimplemented() {
5571 // Unimplemented(); 5014 // Unimplemented();
5572 } 5015 }
5573
5574
5575 // Local Variables: ***
5576 // c-indentation-style: gnu ***
5577 // End: ***