comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 6010:720b6a76dd9d

7157073: G1: type change size_t -> uint for region counts / indexes Summary: Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint. Reviewed-by: iveresov, brutisso, jmasa, jwilhelm
author tonyp
date Wed, 18 Apr 2012 07:21:15 -0400
parents b632e80fc9dc
children f7a8920427a6
comparison
equal deleted inserted replaced
6009:dde53abda3d6 6010:720b6a76dd9d
232 } 232 }
233 233
234 bool YoungList::check_list_well_formed() { 234 bool YoungList::check_list_well_formed() {
235 bool ret = true; 235 bool ret = true;
236 236
237 size_t length = 0; 237 uint length = 0;
238 HeapRegion* curr = _head; 238 HeapRegion* curr = _head;
239 HeapRegion* last = NULL; 239 HeapRegion* last = NULL;
240 while (curr != NULL) { 240 while (curr != NULL) {
241 if (!curr->is_young()) { 241 if (!curr->is_young()) {
242 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " 242 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
251 } 251 }
252 ret = ret && (length == _length); 252 ret = ret && (length == _length);
253 253
254 if (!ret) { 254 if (!ret) {
255 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); 255 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
256 gclog_or_tty->print_cr("### list has %d entries, _length is %d", 256 gclog_or_tty->print_cr("### list has %u entries, _length is %u",
257 length, _length); 257 length, _length);
258 } 258 }
259 259
260 return ret; 260 return ret;
261 } 261 }
262 262
263 bool YoungList::check_list_empty(bool check_sample) { 263 bool YoungList::check_list_empty(bool check_sample) {
264 bool ret = true; 264 bool ret = true;
265 265
266 if (_length != 0) { 266 if (_length != 0) {
267 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", 267 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
268 _length); 268 _length);
269 ret = false; 269 ret = false;
270 } 270 }
271 if (check_sample && _last_sampled_rs_lengths != 0) { 271 if (check_sample && _last_sampled_rs_lengths != 0) {
272 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); 272 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
335 // the incremental collection set for the next evacuation 335 // the incremental collection set for the next evacuation
336 // pause. 336 // pause.
337 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); 337 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
338 young_index_in_cset += 1; 338 young_index_in_cset += 1;
339 } 339 }
340 assert((size_t) young_index_in_cset == _survivor_length, 340 assert((uint) young_index_in_cset == _survivor_length, "post-condition");
341 "post-condition");
342 _g1h->g1_policy()->note_stop_adding_survivor_regions(); 341 _g1h->g1_policy()->note_stop_adding_survivor_regions();
343 342
344 _head = _survivor_head; 343 _head = _survivor_head;
345 _length = _survivor_length; 344 _length = _survivor_length;
346 if (_survivor_head != NULL) { 345 if (_survivor_head != NULL) {
531 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 530 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
532 while (!_secondary_free_list.is_empty() || free_regions_coming()) { 531 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
533 if (!_secondary_free_list.is_empty()) { 532 if (!_secondary_free_list.is_empty()) {
534 if (G1ConcRegionFreeingVerbose) { 533 if (G1ConcRegionFreeingVerbose) {
535 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 534 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
536 "secondary_free_list has "SIZE_FORMAT" entries", 535 "secondary_free_list has %u entries",
537 _secondary_free_list.length()); 536 _secondary_free_list.length());
538 } 537 }
539 // It looks as if there are free regions available on the 538 // It looks as if there are free regions available on the
540 // secondary_free_list. Let's move them to the free_list and try 539 // secondary_free_list. Let's move them to the free_list and try
541 // again to allocate from it. 540 // again to allocate from it.
617 } 616 }
618 } 617 }
619 return res; 618 return res;
620 } 619 }
621 620
622 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, 621 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
623 size_t word_size) { 622 size_t word_size) {
624 assert(isHumongous(word_size), "word_size should be humongous"); 623 assert(isHumongous(word_size), "word_size should be humongous");
625 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 624 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
626 625
627 size_t first = G1_NULL_HRS_INDEX; 626 uint first = G1_NULL_HRS_INDEX;
628 if (num_regions == 1) { 627 if (num_regions == 1) {
629 // Only one region to allocate, no need to go through the slower 628 // Only one region to allocate, no need to go through the slower
630 // path. The caller will attempt the expasion if this fails, so 629 // path. The caller will attempt the expasion if this fails, so
631 // let's not try to expand here too. 630 // let's not try to expand here too.
632 HeapRegion* hr = new_region(word_size, false /* do_expand */); 631 HeapRegion* hr = new_region(word_size, false /* do_expand */);
648 append_secondary_free_list_if_not_empty_with_lock(); 647 append_secondary_free_list_if_not_empty_with_lock();
649 648
650 if (free_regions() >= num_regions) { 649 if (free_regions() >= num_regions) {
651 first = _hrs.find_contiguous(num_regions); 650 first = _hrs.find_contiguous(num_regions);
652 if (first != G1_NULL_HRS_INDEX) { 651 if (first != G1_NULL_HRS_INDEX) {
653 for (size_t i = first; i < first + num_regions; ++i) { 652 for (uint i = first; i < first + num_regions; ++i) {
654 HeapRegion* hr = region_at(i); 653 HeapRegion* hr = region_at(i);
655 assert(hr->is_empty(), "sanity"); 654 assert(hr->is_empty(), "sanity");
656 assert(is_on_master_free_list(hr), "sanity"); 655 assert(is_on_master_free_list(hr), "sanity");
657 hr->set_pending_removal(true); 656 hr->set_pending_removal(true);
658 } 657 }
662 } 661 }
663 return first; 662 return first;
664 } 663 }
665 664
666 HeapWord* 665 HeapWord*
667 G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, 666 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
668 size_t num_regions, 667 uint num_regions,
669 size_t word_size) { 668 size_t word_size) {
670 assert(first != G1_NULL_HRS_INDEX, "pre-condition"); 669 assert(first != G1_NULL_HRS_INDEX, "pre-condition");
671 assert(isHumongous(word_size), "word_size should be humongous"); 670 assert(isHumongous(word_size), "word_size should be humongous");
672 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 671 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
673 672
674 // Index of last region in the series + 1. 673 // Index of last region in the series + 1.
675 size_t last = first + num_regions; 674 uint last = first + num_regions;
676 675
677 // We need to initialize the region(s) we just discovered. This is 676 // We need to initialize the region(s) we just discovered. This is
678 // a bit tricky given that it can happen concurrently with 677 // a bit tricky given that it can happen concurrently with
679 // refinement threads refining cards on these regions and 678 // refinement threads refining cards on these regions and
680 // potentially wanting to refine the BOT as they are scanning 679 // potentially wanting to refine the BOT as they are scanning
681 // those cards (this can happen shortly after a cleanup; see CR 680 // those cards (this can happen shortly after a cleanup; see CR
682 // 6991377). So we have to set up the region(s) carefully and in 681 // 6991377). So we have to set up the region(s) carefully and in
683 // a specific order. 682 // a specific order.
684 683
685 // The word size sum of all the regions we will allocate. 684 // The word size sum of all the regions we will allocate.
686 size_t word_size_sum = num_regions * HeapRegion::GrainWords; 685 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
687 assert(word_size <= word_size_sum, "sanity"); 686 assert(word_size <= word_size_sum, "sanity");
688 687
689 // This will be the "starts humongous" region. 688 // This will be the "starts humongous" region.
690 HeapRegion* first_hr = region_at(first); 689 HeapRegion* first_hr = region_at(first);
691 // The header of the new object will be placed at the bottom of 690 // The header of the new object will be placed at the bottom of
720 first_hr->set_startsHumongous(new_top, new_end); 719 first_hr->set_startsHumongous(new_top, new_end);
721 720
722 // Then, if there are any, we will set up the "continues 721 // Then, if there are any, we will set up the "continues
723 // humongous" regions. 722 // humongous" regions.
724 HeapRegion* hr = NULL; 723 HeapRegion* hr = NULL;
725 for (size_t i = first + 1; i < last; ++i) { 724 for (uint i = first + 1; i < last; ++i) {
726 hr = region_at(i); 725 hr = region_at(i);
727 hr->set_continuesHumongous(first_hr); 726 hr->set_continuesHumongous(first_hr);
728 } 727 }
729 // If we have "continues humongous" regions (hr != NULL), then the 728 // If we have "continues humongous" regions (hr != NULL), then the
730 // end of the last one should match new_end. 729 // end of the last one should match new_end.
766 // fields here. The way we set top for all regions (i.e., top == 765 // fields here. The way we set top for all regions (i.e., top ==
767 // end for all regions but the last one, top == new_top for the 766 // end for all regions but the last one, top == new_top for the
768 // last one) is actually used when we will free up the humongous 767 // last one) is actually used when we will free up the humongous
769 // region in free_humongous_region(). 768 // region in free_humongous_region().
770 hr = NULL; 769 hr = NULL;
771 for (size_t i = first + 1; i < last; ++i) { 770 for (uint i = first + 1; i < last; ++i) {
772 hr = region_at(i); 771 hr = region_at(i);
773 if ((i + 1) == last) { 772 if ((i + 1) == last) {
774 // last continues humongous region 773 // last continues humongous region
775 assert(hr->bottom() < new_top && new_top <= hr->end(), 774 assert(hr->bottom() < new_top && new_top <= hr->end(),
776 "new_top should fall on this region"); 775 "new_top should fall on this region");
802 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { 801 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
803 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 802 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
804 803
805 verify_region_sets_optional(); 804 verify_region_sets_optional();
806 805
807 size_t num_regions = 806 size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
808 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 807 uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
809 size_t x_size = expansion_regions(); 808 uint x_num = expansion_regions();
810 size_t fs = _hrs.free_suffix(); 809 uint fs = _hrs.free_suffix();
811 size_t first = humongous_obj_allocate_find_first(num_regions, word_size); 810 uint first = humongous_obj_allocate_find_first(num_regions, word_size);
812 if (first == G1_NULL_HRS_INDEX) { 811 if (first == G1_NULL_HRS_INDEX) {
813 // The only thing we can do now is attempt expansion. 812 // The only thing we can do now is attempt expansion.
814 if (fs + x_size >= num_regions) { 813 if (fs + x_num >= num_regions) {
815 // If the number of regions we're trying to allocate for this 814 // If the number of regions we're trying to allocate for this
816 // object is at most the number of regions in the free suffix, 815 // object is at most the number of regions in the free suffix,
817 // then the call to humongous_obj_allocate_find_first() above 816 // then the call to humongous_obj_allocate_find_first() above
818 // should have succeeded and we wouldn't be here. 817 // should have succeeded and we wouldn't be here.
819 // 818 //
1779 size_t old_mem_size = _g1_storage.committed_size(); 1778 size_t old_mem_size = _g1_storage.committed_size();
1780 size_t aligned_shrink_bytes = 1779 size_t aligned_shrink_bytes =
1781 ReservedSpace::page_align_size_down(shrink_bytes); 1780 ReservedSpace::page_align_size_down(shrink_bytes);
1782 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1781 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1783 HeapRegion::GrainBytes); 1782 HeapRegion::GrainBytes);
1784 size_t num_regions_deleted = 0; 1783 uint num_regions_deleted = 0;
1785 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); 1784 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1786 HeapWord* old_end = (HeapWord*) _g1_storage.high(); 1785 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1787 assert(mr.end() == old_end, "post-condition"); 1786 assert(mr.end() == old_end, "post-condition");
1788 1787
1789 ergo_verbose3(ErgoHeapSizing, 1788 ergo_verbose3(ErgoHeapSizing,
2002 // happen in asserts: DLD.) 2001 // happen in asserts: DLD.)
2003 _reserved.set_word_size(0); 2002 _reserved.set_word_size(0);
2004 _reserved.set_start((HeapWord*)heap_rs.base()); 2003 _reserved.set_start((HeapWord*)heap_rs.base());
2005 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 2004 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2006 2005
2007 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; 2006 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2008 2007
2009 // Create the gen rem set (and barrier set) for the entire reserved region. 2008 // Create the gen rem set (and barrier set) for the entire reserved region.
2010 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 2009 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2011 set_barrier_set(rem_set()->bs()); 2010 set_barrier_set(rem_set()->bs());
2012 if (barrier_set()->is_a(BarrierSet::ModRef)) { 2011 if (barrier_set()->is_a(BarrierSet::ModRef)) {
2039 (HeapWord*) _g1_reserved.end(), 2038 (HeapWord*) _g1_reserved.end(),
2040 _expansion_regions); 2039 _expansion_regions);
2041 2040
2042 // 6843694 - ensure that the maximum region index can fit 2041 // 6843694 - ensure that the maximum region index can fit
2043 // in the remembered set structures. 2042 // in the remembered set structures.
2044 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; 2043 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2045 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); 2044 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2046 2045
2047 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; 2046 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2048 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); 2047 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2049 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, 2048 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2055 heap_word_size(init_byte_size)); 2054 heap_word_size(init_byte_size));
2056 2055
2057 _g1h = this; 2056 _g1h = this;
2058 2057
2059 _in_cset_fast_test_length = max_regions(); 2058 _in_cset_fast_test_length = max_regions();
2060 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 2059 _in_cset_fast_test_base =
2060 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
2061 2061
2062 // We're biasing _in_cset_fast_test to avoid subtracting the 2062 // We're biasing _in_cset_fast_test to avoid subtracting the
2063 // beginning of the heap every time we want to index; basically 2063 // beginning of the heap every time we want to index; basically
2064 // it's the same with what we do with the card table. 2064 // it's the same with what we do with the card table.
2065 _in_cset_fast_test = _in_cset_fast_test_base - 2065 _in_cset_fast_test = _in_cset_fast_test_base -
2066 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); 2066 ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2067 2067
2068 // Clear the _cset_fast_test bitmap in anticipation of adding 2068 // Clear the _cset_fast_test bitmap in anticipation of adding
2069 // regions to the incremental collection set for the first 2069 // regions to the incremental collection set for the first
2070 // evacuation pause. 2070 // evacuation pause.
2071 clear_cset_fast_test(); 2071 clear_cset_fast_test();
2072 2072
2073 // Create the ConcurrentMark data structure and thread. 2073 // Create the ConcurrentMark data structure and thread.
2074 // (Must do this late, so that "max_regions" is defined.) 2074 // (Must do this late, so that "max_regions" is defined.)
2075 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); 2075 _cm = new ConcurrentMark(heap_rs, max_regions());
2076 _cmThread = _cm->cmThread(); 2076 _cmThread = _cm->cmThread();
2077 2077
2078 // Initialize the from_card cache structure of HeapRegionRemSet. 2078 // Initialize the from_card cache structure of HeapRegionRemSet.
2079 HeapRegionRemSet::init_heap(max_regions()); 2079 HeapRegionRemSet::init_heap(max_regions());
2080 2080
2579 void 2579 void
2580 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 2580 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2581 uint worker, 2581 uint worker,
2582 uint no_of_par_workers, 2582 uint no_of_par_workers,
2583 jint claim_value) { 2583 jint claim_value) {
2584 const size_t regions = n_regions(); 2584 const uint regions = n_regions();
2585 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 2585 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2586 no_of_par_workers : 2586 no_of_par_workers :
2587 1); 2587 1);
2588 assert(UseDynamicNumberOfGCThreads || 2588 assert(UseDynamicNumberOfGCThreads ||
2589 no_of_par_workers == workers()->total_workers(), 2589 no_of_par_workers == workers()->total_workers(),
2590 "Non dynamic should use fixed number of workers"); 2590 "Non dynamic should use fixed number of workers");
2591 // try to spread out the starting points of the workers 2591 // try to spread out the starting points of the workers
2592 const size_t start_index = regions / max_workers * (size_t) worker; 2592 const uint start_index = regions / max_workers * worker;
2593 2593
2594 // each worker will actually look at all regions 2594 // each worker will actually look at all regions
2595 for (size_t count = 0; count < regions; ++count) { 2595 for (uint count = 0; count < regions; ++count) {
2596 const size_t index = (start_index + count) % regions; 2596 const uint index = (start_index + count) % regions;
2597 assert(0 <= index && index < regions, "sanity"); 2597 assert(0 <= index && index < regions, "sanity");
2598 HeapRegion* r = region_at(index); 2598 HeapRegion* r = region_at(index);
2599 // we'll ignore "continues humongous" regions (we'll process them 2599 // we'll ignore "continues humongous" regions (we'll process them
2600 // when we come across their corresponding "start humongous" 2600 // when we come across their corresponding "start humongous"
2601 // region) and regions already claimed 2601 // region) and regions already claimed
2613 // closure on the "starts humongous" region might de-allocate 2613 // closure on the "starts humongous" region might de-allocate
2614 // and clear all its "continues humongous" regions and, as a 2614 // and clear all its "continues humongous" regions and, as a
2615 // result, we might end up processing them twice. So, we'll do 2615 // result, we might end up processing them twice. So, we'll do
2616 // them first (notice: most closures will ignore them anyway) and 2616 // them first (notice: most closures will ignore them anyway) and
2617 // then we'll do the "starts humongous" region. 2617 // then we'll do the "starts humongous" region.
2618 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { 2618 for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2619 HeapRegion* chr = region_at(ch_index); 2619 HeapRegion* chr = region_at(ch_index);
2620 2620
2621 // if the region has already been claimed or it's not 2621 // if the region has already been claimed or it's not
2622 // "continues humongous" we're done 2622 // "continues humongous" we're done
2623 if (chr->claim_value() == claim_value || 2623 if (chr->claim_value() == claim_value ||
2681 // regions is correct. 2681 // regions is correct.
2682 2682
2683 class CheckClaimValuesClosure : public HeapRegionClosure { 2683 class CheckClaimValuesClosure : public HeapRegionClosure {
2684 private: 2684 private:
2685 jint _claim_value; 2685 jint _claim_value;
2686 size_t _failures; 2686 uint _failures;
2687 HeapRegion* _sh_region; 2687 HeapRegion* _sh_region;
2688
2688 public: 2689 public:
2689 CheckClaimValuesClosure(jint claim_value) : 2690 CheckClaimValuesClosure(jint claim_value) :
2690 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } 2691 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2691 bool doHeapRegion(HeapRegion* r) { 2692 bool doHeapRegion(HeapRegion* r) {
2692 if (r->claim_value() != _claim_value) { 2693 if (r->claim_value() != _claim_value) {
2710 ++_failures; 2711 ++_failures;
2711 } 2712 }
2712 } 2713 }
2713 return false; 2714 return false;
2714 } 2715 }
2715 size_t failures() { 2716 uint failures() { return _failures; }
2716 return _failures;
2717 }
2718 }; 2717 };
2719 2718
2720 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { 2719 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2721 CheckClaimValuesClosure cl(claim_value); 2720 CheckClaimValuesClosure cl(claim_value);
2722 heap_region_iterate(&cl); 2721 heap_region_iterate(&cl);
2723 return cl.failures() == 0; 2722 return cl.failures() == 0;
2724 } 2723 }
2725 2724
2726 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure { 2725 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2727 jint _claim_value; 2726 private:
2728 size_t _failures; 2727 jint _claim_value;
2728 uint _failures;
2729 2729
2730 public: 2730 public:
2731 CheckClaimValuesInCSetHRClosure(jint claim_value) : 2731 CheckClaimValuesInCSetHRClosure(jint claim_value) :
2732 _claim_value(claim_value), 2732 _claim_value(claim_value), _failures(0) { }
2733 _failures(0) { } 2733
2734 2734 uint failures() { return _failures; }
2735 size_t failures() {
2736 return _failures;
2737 }
2738 2735
2739 bool doHeapRegion(HeapRegion* hr) { 2736 bool doHeapRegion(HeapRegion* hr) {
2740 assert(hr->in_collection_set(), "how?"); 2737 assert(hr->in_collection_set(), "how?");
2741 assert(!hr->isHumongous(), "H-region in CSet"); 2738 assert(!hr->isHumongous(), "H-region in CSet");
2742 if (hr->claim_value() != _claim_value) { 2739 if (hr->claim_value() != _claim_value) {
2799 // p threads 2796 // p threads
2800 // Then thread t will start at region floor ((t * n) / p) 2797 // Then thread t will start at region floor ((t * n) / p)
2801 2798
2802 result = g1_policy()->collection_set(); 2799 result = g1_policy()->collection_set();
2803 if (G1CollectedHeap::use_parallel_gc_threads()) { 2800 if (G1CollectedHeap::use_parallel_gc_threads()) {
2804 size_t cs_size = g1_policy()->cset_region_length(); 2801 uint cs_size = g1_policy()->cset_region_length();
2805 uint active_workers = workers()->active_workers(); 2802 uint active_workers = workers()->active_workers();
2806 assert(UseDynamicNumberOfGCThreads || 2803 assert(UseDynamicNumberOfGCThreads ||
2807 active_workers == workers()->total_workers(), 2804 active_workers == workers()->total_workers(),
2808 "Unless dynamic should use total workers"); 2805 "Unless dynamic should use total workers");
2809 2806
2810 size_t end_ind = (cs_size * worker_i) / active_workers; 2807 uint end_ind = (cs_size * worker_i) / active_workers;
2811 size_t start_ind = 0; 2808 uint start_ind = 0;
2812 2809
2813 if (worker_i > 0 && 2810 if (worker_i > 0 &&
2814 _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) { 2811 _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2815 // Previous workers starting region is valid 2812 // Previous workers starting region is valid
2816 // so let's iterate from there 2813 // so let's iterate from there
2817 start_ind = (cs_size * (worker_i - 1)) / active_workers; 2814 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2818 result = _worker_cset_start_region[worker_i - 1]; 2815 result = _worker_cset_start_region[worker_i - 1];
2819 } 2816 }
2820 2817
2821 for (size_t i = start_ind; i < end_ind; i++) { 2818 for (uint i = start_ind; i < end_ind; i++) {
2822 result = result->next_in_collection_set(); 2819 result = result->next_in_collection_set();
2823 } 2820 }
2824 } 2821 }
2825 2822
2826 // Note: the calculated starting heap region may be NULL 2823 // Note: the calculated starting heap region may be NULL
3278 _g1_storage.low_boundary(), 3275 _g1_storage.low_boundary(),
3279 _g1_storage.high(), 3276 _g1_storage.high(),
3280 _g1_storage.high_boundary()); 3277 _g1_storage.high_boundary());
3281 st->cr(); 3278 st->cr();
3282 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); 3279 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3283 size_t young_regions = _young_list->length(); 3280 uint young_regions = _young_list->length();
3284 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", 3281 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3285 young_regions, young_regions * HeapRegion::GrainBytes / K); 3282 (size_t) young_regions * HeapRegion::GrainBytes / K);
3286 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); 3283 uint survivor_regions = g1_policy()->recorded_survivor_regions();
3287 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", 3284 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3288 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); 3285 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3289 st->cr(); 3286 st->cr();
3290 perm()->as_gen()->print_on(st); 3287 perm()->as_gen()->print_on(st);
3291 } 3288 }
3292 3289
3293 void G1CollectedHeap::print_extended_on(outputStream* st) const { 3290 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3294 print_on(st); 3291 print_on(st);
3295 3292
3296 // Print the per-region information. 3293 // Print the per-region information.
3297 st->cr(); 3294 st->cr();
3298 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)"); 3295 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3296 "HS=humongous(starts), HC=humongous(continues), "
3297 "CS=collection set, F=free, TS=gc time stamp, "
3298 "PTAMS=previous top-at-mark-start, "
3299 "NTAMS=next top-at-mark-start)");
3299 PrintRegionClosure blk(st); 3300 PrintRegionClosure blk(st);
3300 heap_region_iterate(&blk); 3301 heap_region_iterate(&blk);
3301 } 3302 }
3302 3303
3303 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 3304 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3471 return g1_rem_set()->cardsScanned(); 3472 return g1_rem_set()->cardsScanned();
3472 } 3473 }
3473 3474
3474 void 3475 void
3475 G1CollectedHeap::setup_surviving_young_words() { 3476 G1CollectedHeap::setup_surviving_young_words() {
3476 guarantee( _surviving_young_words == NULL, "pre-condition" ); 3477 assert(_surviving_young_words == NULL, "pre-condition");
3477 size_t array_length = g1_policy()->young_cset_region_length(); 3478 uint array_length = g1_policy()->young_cset_region_length();
3478 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); 3479 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
3479 if (_surviving_young_words == NULL) { 3480 if (_surviving_young_words == NULL) {
3480 vm_exit_out_of_memory(sizeof(size_t) * array_length, 3481 vm_exit_out_of_memory(sizeof(size_t) * array_length,
3481 "Not enough space for young surv words summary."); 3482 "Not enough space for young surv words summary.");
3482 } 3483 }
3483 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); 3484 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3484 #ifdef ASSERT 3485 #ifdef ASSERT
3485 for (size_t i = 0; i < array_length; ++i) { 3486 for (uint i = 0; i < array_length; ++i) {
3486 assert( _surviving_young_words[i] == 0, "memset above" ); 3487 assert( _surviving_young_words[i] == 0, "memset above" );
3487 } 3488 }
3488 #endif // !ASSERT 3489 #endif // !ASSERT
3489 } 3490 }
3490 3491
3491 void 3492 void
3492 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { 3493 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3493 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 3494 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3494 size_t array_length = g1_policy()->young_cset_region_length(); 3495 uint array_length = g1_policy()->young_cset_region_length();
3495 for (size_t i = 0; i < array_length; ++i) 3496 for (uint i = 0; i < array_length; ++i) {
3496 _surviving_young_words[i] += surv_young_words[i]; 3497 _surviving_young_words[i] += surv_young_words[i];
3498 }
3497 } 3499 }
3498 3500
3499 void 3501 void
3500 G1CollectedHeap::cleanup_surviving_young_words() { 3502 G1CollectedHeap::cleanup_surviving_young_words() {
3501 guarantee( _surviving_young_words != NULL, "pre-condition" ); 3503 guarantee( _surviving_young_words != NULL, "pre-condition" );
4240 // we allocate G1YoungSurvRateNumRegions plus one entries, since 4242 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4241 // we "sacrifice" entry 0 to keep track of surviving bytes for 4243 // we "sacrifice" entry 0 to keep track of surviving bytes for
4242 // non-young regions (where the age is -1) 4244 // non-young regions (where the age is -1)
4243 // We also add a few elements at the beginning and at the end in 4245 // We also add a few elements at the beginning and at the end in
4244 // an attempt to eliminate cache contention 4246 // an attempt to eliminate cache contention
4245 size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); 4247 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4246 size_t array_length = PADDING_ELEM_NUM + 4248 uint array_length = PADDING_ELEM_NUM +
4247 real_length + 4249 real_length +
4248 PADDING_ELEM_NUM; 4250 PADDING_ELEM_NUM;
4249 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); 4251 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
4250 if (_surviving_young_words_base == NULL) 4252 if (_surviving_young_words_base == NULL)
4251 vm_exit_out_of_memory(array_length * sizeof(size_t), 4253 vm_exit_out_of_memory(array_length * sizeof(size_t),
4252 "Not enough space for young surv histo."); 4254 "Not enough space for young surv histo.");
4253 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 4255 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4254 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 4256 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4255 4257
4256 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; 4258 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4257 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; 4259 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
4258 4260
4259 _start = os::elapsedTime(); 4261 _start = os::elapsedTime();
4386 } 4388 }
4387 4389
4388 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4390 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4389 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> 4391 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4390 ::copy_to_survivor_space(oop old) { 4392 ::copy_to_survivor_space(oop old) {
4391 size_t word_sz = old->size(); 4393 size_t word_sz = old->size();
4392 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 4394 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4393 // +1 to make the -1 indexes valid... 4395 // +1 to make the -1 indexes valid...
4394 int young_index = from_region->young_index_in_cset()+1; 4396 int young_index = from_region->young_index_in_cset()+1;
4395 assert( (from_region->is_young() && young_index > 0) || 4397 assert( (from_region->is_young() && young_index > 0) ||
4396 (!from_region->is_young() && young_index == 0), "invariant" ); 4398 (!from_region->is_young() && young_index == 0), "invariant" );
5583 size_t hr_pre_used = 0; 5585 size_t hr_pre_used = 0;
5584 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); 5586 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5585 hr->set_notHumongous(); 5587 hr->set_notHumongous();
5586 free_region(hr, &hr_pre_used, free_list, par); 5588 free_region(hr, &hr_pre_used, free_list, par);
5587 5589
5588 size_t i = hr->hrs_index() + 1; 5590 uint i = hr->hrs_index() + 1;
5589 size_t num = 1; 5591 uint num = 1;
5590 while (i < n_regions()) { 5592 while (i < n_regions()) {
5591 HeapRegion* curr_hr = region_at(i); 5593 HeapRegion* curr_hr = region_at(i);
5592 if (!curr_hr->continuesHumongous()) { 5594 if (!curr_hr->continuesHumongous()) {
5593 break; 5595 break;
5594 } 5596 }
5793 cur->set_in_collection_set(false); 5795 cur->set_in_collection_set(false);
5794 5796
5795 if (cur->is_young()) { 5797 if (cur->is_young()) {
5796 int index = cur->young_index_in_cset(); 5798 int index = cur->young_index_in_cset();
5797 assert(index != -1, "invariant"); 5799 assert(index != -1, "invariant");
5798 assert((size_t) index < policy->young_cset_region_length(), "invariant"); 5800 assert((uint) index < policy->young_cset_region_length(), "invariant");
5799 size_t words_survived = _surviving_young_words[index]; 5801 size_t words_survived = _surviving_young_words[index];
5800 cur->record_surv_words_in_group(words_survived); 5802 cur->record_surv_words_in_group(words_survived);
5801 5803
5802 // At this point the we have 'popped' cur from the collection set 5804 // At this point the we have 'popped' cur from the collection set
5803 // (linked via next_in_collection_set()) but it is still in the 5805 // (linked via next_in_collection_set()) but it is still in the
6133 } 6135 }
6134 6136
6135 // Methods for the GC alloc regions 6137 // Methods for the GC alloc regions
6136 6138
6137 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, 6139 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6138 size_t count, 6140 uint count,
6139 GCAllocPurpose ap) { 6141 GCAllocPurpose ap) {
6140 assert(FreeList_lock->owned_by_self(), "pre-condition"); 6142 assert(FreeList_lock->owned_by_self(), "pre-condition");
6141 6143
6142 if (count < g1_policy()->max_regions(ap)) { 6144 if (count < g1_policy()->max_regions(ap)) {
6143 HeapRegion* new_alloc_region = new_region(word_size, 6145 HeapRegion* new_alloc_region = new_region(word_size,
6205 class VerifyRegionListsClosure : public HeapRegionClosure { 6207 class VerifyRegionListsClosure : public HeapRegionClosure {
6206 private: 6208 private:
6207 FreeRegionList* _free_list; 6209 FreeRegionList* _free_list;
6208 OldRegionSet* _old_set; 6210 OldRegionSet* _old_set;
6209 HumongousRegionSet* _humongous_set; 6211 HumongousRegionSet* _humongous_set;
6210 size_t _region_count; 6212 uint _region_count;
6211 6213
6212 public: 6214 public:
6213 VerifyRegionListsClosure(OldRegionSet* old_set, 6215 VerifyRegionListsClosure(OldRegionSet* old_set,
6214 HumongousRegionSet* humongous_set, 6216 HumongousRegionSet* humongous_set,
6215 FreeRegionList* free_list) : 6217 FreeRegionList* free_list) :
6216 _old_set(old_set), _humongous_set(humongous_set), 6218 _old_set(old_set), _humongous_set(humongous_set),
6217 _free_list(free_list), _region_count(0) { } 6219 _free_list(free_list), _region_count(0) { }
6218 6220
6219 size_t region_count() { return _region_count; } 6221 uint region_count() { return _region_count; }
6220 6222
6221 bool doHeapRegion(HeapRegion* hr) { 6223 bool doHeapRegion(HeapRegion* hr) {
6222 _region_count += 1; 6224 _region_count += 1;
6223 6225
6224 if (hr->continuesHumongous()) { 6226 if (hr->continuesHumongous()) {
6236 } 6238 }
6237 return false; 6239 return false;
6238 } 6240 }
6239 }; 6241 };
6240 6242
6241 HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index, 6243 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6242 HeapWord* bottom) { 6244 HeapWord* bottom) {
6243 HeapWord* end = bottom + HeapRegion::GrainWords; 6245 HeapWord* end = bottom + HeapRegion::GrainWords;
6244 MemRegion mr(bottom, end); 6246 MemRegion mr(bottom, end);
6245 assert(_g1_reserved.contains(mr), "invariant"); 6247 assert(_g1_reserved.contains(mr), "invariant");
6246 // This might return NULL if the allocation fails 6248 // This might return NULL if the allocation fails