comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 2361:1216415d8e35

7014923: G1: code cleanup Summary: Some G1 code cleanup. Reviewed-by: johnc, jcoomes, jwilhelm
author tonyp
date Fri, 04 Mar 2011 17:13:19 -0500
parents 4e0069ff33df
children 92da084fefc9
comparison
equal deleted inserted replaced
2312:11303bede852 2361:1216415d8e35
477 G1CollectedHeap* G1CollectedHeap::_g1h; 477 G1CollectedHeap* G1CollectedHeap::_g1h;
478 478
479 // Private methods. 479 // Private methods.
480 480
481 HeapRegion* 481 HeapRegion*
482 G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) { 482 G1CollectedHeap::new_region_try_secondary_free_list() {
483 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 483 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
484 while (!_secondary_free_list.is_empty() || free_regions_coming()) { 484 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
485 if (!_secondary_free_list.is_empty()) { 485 if (!_secondary_free_list.is_empty()) {
486 if (G1ConcRegionFreeingVerbose) { 486 if (G1ConcRegionFreeingVerbose) {
487 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 487 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
529 if (!_secondary_free_list.is_empty()) { 529 if (!_secondary_free_list.is_empty()) {
530 if (G1ConcRegionFreeingVerbose) { 530 if (G1ConcRegionFreeingVerbose) {
531 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 531 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
532 "forced to look at the secondary_free_list"); 532 "forced to look at the secondary_free_list");
533 } 533 }
534 res = new_region_try_secondary_free_list(word_size); 534 res = new_region_try_secondary_free_list();
535 if (res != NULL) { 535 if (res != NULL) {
536 return res; 536 return res;
537 } 537 }
538 } 538 }
539 } 539 }
541 if (res == NULL) { 541 if (res == NULL) {
542 if (G1ConcRegionFreeingVerbose) { 542 if (G1ConcRegionFreeingVerbose) {
543 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 543 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
544 "res == NULL, trying the secondary_free_list"); 544 "res == NULL, trying the secondary_free_list");
545 } 545 }
546 res = new_region_try_secondary_free_list(word_size); 546 res = new_region_try_secondary_free_list();
547 } 547 }
548 if (res == NULL && do_expand) { 548 if (res == NULL && do_expand) {
549 if (expand(word_size * HeapWordSize)) { 549 if (expand(word_size * HeapWordSize)) {
550 // The expansion succeeded and so we should have at least one 550 // The expansion succeeded and so we should have at least one
551 // region on the free list. 551 // region on the free list.
577 return alloc_region; 577 return alloc_region;
578 } 578 }
579 579
580 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, 580 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
581 size_t word_size) { 581 size_t word_size) {
582 assert(isHumongous(word_size), "word_size should be humongous");
583 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
584
582 int first = -1; 585 int first = -1;
583 if (num_regions == 1) { 586 if (num_regions == 1) {
584 // Only one region to allocate, no need to go through the slower 587 // Only one region to allocate, no need to go through the slower
585 // path. The caller will attempt the expasion if this fails, so 588 // path. The caller will attempt the expasion if this fails, so
586 // let's not try to expand here too. 589 // let's not try to expand here too.
598 // that we only need to do this if we need to allocate more than 601 // that we only need to do this if we need to allocate more than
599 // one region to satisfy the current humongous allocation 602 // one region to satisfy the current humongous allocation
600 // request. If we are only allocating one region we use the common 603 // request. If we are only allocating one region we use the common
601 // region allocation code (see above). 604 // region allocation code (see above).
602 wait_while_free_regions_coming(); 605 wait_while_free_regions_coming();
603 append_secondary_free_list_if_not_empty(); 606 append_secondary_free_list_if_not_empty_with_lock();
604 607
605 if (free_regions() >= num_regions) { 608 if (free_regions() >= num_regions) {
606 first = _hrs->find_contiguous(num_regions); 609 first = _hrs->find_contiguous(num_regions);
607 if (first != -1) { 610 if (first != -1) {
608 for (int i = first; i < first + (int) num_regions; ++i) { 611 for (int i = first; i < first + (int) num_regions; ++i) {
609 HeapRegion* hr = _hrs->at(i); 612 HeapRegion* hr = _hrs->at(i);
610 assert(hr->is_empty(), "sanity"); 613 assert(hr->is_empty(), "sanity");
611 assert(is_on_free_list(hr), "sanity"); 614 assert(is_on_master_free_list(hr), "sanity");
612 hr->set_pending_removal(true); 615 hr->set_pending_removal(true);
613 } 616 }
614 _free_list.remove_all_pending(num_regions); 617 _free_list.remove_all_pending(num_regions);
615 } 618 }
616 } 619 }
617 } 620 }
618 return first; 621 return first;
622 }
623
624 HeapWord*
625 G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
626 size_t num_regions,
627 size_t word_size) {
628 assert(first != -1, "pre-condition");
629 assert(isHumongous(word_size), "word_size should be humongous");
630 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
631
632 // Index of last region in the series + 1.
633 int last = first + (int) num_regions;
634
635 // We need to initialize the region(s) we just discovered. This is
636 // a bit tricky given that it can happen concurrently with
637 // refinement threads refining cards on these regions and
638 // potentially wanting to refine the BOT as they are scanning
639 // those cards (this can happen shortly after a cleanup; see CR
640 // 6991377). So we have to set up the region(s) carefully and in
641 // a specific order.
642
643 // The word size sum of all the regions we will allocate.
644 size_t word_size_sum = num_regions * HeapRegion::GrainWords;
645 assert(word_size <= word_size_sum, "sanity");
646
647 // This will be the "starts humongous" region.
648 HeapRegion* first_hr = _hrs->at(first);
649 // The header of the new object will be placed at the bottom of
650 // the first region.
651 HeapWord* new_obj = first_hr->bottom();
652 // This will be the new end of the first region in the series that
653 // should also match the end of the last region in the seriers.
654 HeapWord* new_end = new_obj + word_size_sum;
655 // This will be the new top of the first region that will reflect
656 // this allocation.
657 HeapWord* new_top = new_obj + word_size;
658
659 // First, we need to zero the header of the space that we will be
660 // allocating. When we update top further down, some refinement
661 // threads might try to scan the region. By zeroing the header we
662 // ensure that any thread that will try to scan the region will
663 // come across the zero klass word and bail out.
664 //
665 // NOTE: It would not have been correct to have used
666 // CollectedHeap::fill_with_object() and make the space look like
667 // an int array. The thread that is doing the allocation will
668 // later update the object header to a potentially different array
669 // type and, for a very short period of time, the klass and length
670 // fields will be inconsistent. This could cause a refinement
671 // thread to calculate the object size incorrectly.
672 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
673
674 // We will set up the first region as "starts humongous". This
675 // will also update the BOT covering all the regions to reflect
676 // that there is a single object that starts at the bottom of the
677 // first region.
678 first_hr->set_startsHumongous(new_top, new_end);
679
680 // Then, if there are any, we will set up the "continues
681 // humongous" regions.
682 HeapRegion* hr = NULL;
683 for (int i = first + 1; i < last; ++i) {
684 hr = _hrs->at(i);
685 hr->set_continuesHumongous(first_hr);
686 }
687 // If we have "continues humongous" regions (hr != NULL), then the
688 // end of the last one should match new_end.
689 assert(hr == NULL || hr->end() == new_end, "sanity");
690
691 // Up to this point no concurrent thread would have been able to
692 // do any scanning on any region in this series. All the top
693 // fields still point to bottom, so the intersection between
694 // [bottom,top] and [card_start,card_end] will be empty. Before we
695 // update the top fields, we'll do a storestore to make sure that
696 // no thread sees the update to top before the zeroing of the
697 // object header and the BOT initialization.
698 OrderAccess::storestore();
699
700 // Now that the BOT and the object header have been initialized,
701 // we can update top of the "starts humongous" region.
702 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
703 "new_top should be in this region");
704 first_hr->set_top(new_top);
705
706 // Now, we will update the top fields of the "continues humongous"
707 // regions. The reason we need to do this is that, otherwise,
708 // these regions would look empty and this will confuse parts of
709 // G1. For example, the code that looks for a consecutive number
710 // of empty regions will consider them empty and try to
711 // re-allocate them. We can extend is_empty() to also include
712 // !continuesHumongous(), but it is easier to just update the top
713 // fields here. The way we set top for all regions (i.e., top ==
714 // end for all regions but the last one, top == new_top for the
715 // last one) is actually used when we will free up the humongous
716 // region in free_humongous_region().
717 hr = NULL;
718 for (int i = first + 1; i < last; ++i) {
719 hr = _hrs->at(i);
720 if ((i + 1) == last) {
721 // last continues humongous region
722 assert(hr->bottom() < new_top && new_top <= hr->end(),
723 "new_top should fall on this region");
724 hr->set_top(new_top);
725 } else {
726 // not last one
727 assert(new_top > hr->end(), "new_top should be above this region");
728 hr->set_top(hr->end());
729 }
730 }
731 // If we have continues humongous regions (hr != NULL), then the
732 // end of the last one should match new_end and its top should
733 // match new_top.
734 assert(hr == NULL ||
735 (hr->end() == new_end && hr->top() == new_top), "sanity");
736
737 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
738 _summary_bytes_used += first_hr->used();
739 _humongous_set.add(first_hr);
740
741 return new_obj;
619 } 742 }
620 743
621 // If could fit into free regions w/o expansion, try. 744 // If could fit into free regions w/o expansion, try.
622 // Otherwise, if can expand, do so. 745 // Otherwise, if can expand, do so.
623 // Otherwise, if using ex regions might help, try with ex given back. 746 // Otherwise, if using ex regions might help, try with ex given back.
651 assert(first != -1, "this should have worked"); 774 assert(first != -1, "this should have worked");
652 } 775 }
653 } 776 }
654 } 777 }
655 778
779 HeapWord* result = NULL;
656 if (first != -1) { 780 if (first != -1) {
657 // Index of last region in the series + 1. 781 result =
658 int last = first + (int) num_regions; 782 humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
659 783 assert(result != NULL, "it should always return a valid result");
660 // We need to initialize the region(s) we just discovered. This is
661 // a bit tricky given that it can happen concurrently with
662 // refinement threads refining cards on these regions and
663 // potentially wanting to refine the BOT as they are scanning
664 // those cards (this can happen shortly after a cleanup; see CR
665 // 6991377). So we have to set up the region(s) carefully and in
666 // a specific order.
667
668 // The word size sum of all the regions we will allocate.
669 size_t word_size_sum = num_regions * HeapRegion::GrainWords;
670 assert(word_size <= word_size_sum, "sanity");
671
672 // This will be the "starts humongous" region.
673 HeapRegion* first_hr = _hrs->at(first);
674 // The header of the new object will be placed at the bottom of
675 // the first region.
676 HeapWord* new_obj = first_hr->bottom();
677 // This will be the new end of the first region in the series that
678 // should also match the end of the last region in the seriers.
679 HeapWord* new_end = new_obj + word_size_sum;
680 // This will be the new top of the first region that will reflect
681 // this allocation.
682 HeapWord* new_top = new_obj + word_size;
683
684 // First, we need to zero the header of the space that we will be
685 // allocating. When we update top further down, some refinement
686 // threads might try to scan the region. By zeroing the header we
687 // ensure that any thread that will try to scan the region will
688 // come across the zero klass word and bail out.
689 //
690 // NOTE: It would not have been correct to have used
691 // CollectedHeap::fill_with_object() and make the space look like
692 // an int array. The thread that is doing the allocation will
693 // later update the object header to a potentially different array
694 // type and, for a very short period of time, the klass and length
695 // fields will be inconsistent. This could cause a refinement
696 // thread to calculate the object size incorrectly.
697 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
698
699 // We will set up the first region as "starts humongous". This
700 // will also update the BOT covering all the regions to reflect
701 // that there is a single object that starts at the bottom of the
702 // first region.
703 first_hr->set_startsHumongous(new_top, new_end);
704
705 // Then, if there are any, we will set up the "continues
706 // humongous" regions.
707 HeapRegion* hr = NULL;
708 for (int i = first + 1; i < last; ++i) {
709 hr = _hrs->at(i);
710 hr->set_continuesHumongous(first_hr);
711 }
712 // If we have "continues humongous" regions (hr != NULL), then the
713 // end of the last one should match new_end.
714 assert(hr == NULL || hr->end() == new_end, "sanity");
715
716 // Up to this point no concurrent thread would have been able to
717 // do any scanning on any region in this series. All the top
718 // fields still point to bottom, so the intersection between
719 // [bottom,top] and [card_start,card_end] will be empty. Before we
720 // update the top fields, we'll do a storestore to make sure that
721 // no thread sees the update to top before the zeroing of the
722 // object header and the BOT initialization.
723 OrderAccess::storestore();
724
725 // Now that the BOT and the object header have been initialized,
726 // we can update top of the "starts humongous" region.
727 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
728 "new_top should be in this region");
729 first_hr->set_top(new_top);
730
731 // Now, we will update the top fields of the "continues humongous"
732 // regions. The reason we need to do this is that, otherwise,
733 // these regions would look empty and this will confuse parts of
734 // G1. For example, the code that looks for a consecutive number
735 // of empty regions will consider them empty and try to
736 // re-allocate them. We can extend is_empty() to also include
737 // !continuesHumongous(), but it is easier to just update the top
738 // fields here. The way we set top for all regions (i.e., top ==
739 // end for all regions but the last one, top == new_top for the
740 // last one) is actually used when we will free up the humongous
741 // region in free_humongous_region().
742 hr = NULL;
743 for (int i = first + 1; i < last; ++i) {
744 hr = _hrs->at(i);
745 if ((i + 1) == last) {
746 // last continues humongous region
747 assert(hr->bottom() < new_top && new_top <= hr->end(),
748 "new_top should fall on this region");
749 hr->set_top(new_top);
750 } else {
751 // not last one
752 assert(new_top > hr->end(), "new_top should be above this region");
753 hr->set_top(hr->end());
754 }
755 }
756 // If we have continues humongous regions (hr != NULL), then the
757 // end of the last one should match new_end and its top should
758 // match new_top.
759 assert(hr == NULL ||
760 (hr->end() == new_end && hr->top() == new_top), "sanity");
761
762 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
763 _summary_bytes_used += first_hr->used();
764 _humongous_set.add(first_hr);
765
766 return new_obj;
767 } 784 }
768 785
769 verify_region_sets_optional(); 786 verify_region_sets_optional();
770 return NULL; 787
788 return result;
771 } 789 }
772 790
773 void 791 void
774 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { 792 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
775 // Other threads might still be trying to allocate using CASes out 793 // Other threads might still be trying to allocate using CASes out
1387 1405
1388 double start = os::elapsedTime(); 1406 double start = os::elapsedTime();
1389 g1_policy()->record_full_collection_start(); 1407 g1_policy()->record_full_collection_start();
1390 1408
1391 wait_while_free_regions_coming(); 1409 wait_while_free_regions_coming();
1392 append_secondary_free_list_if_not_empty(); 1410 append_secondary_free_list_if_not_empty_with_lock();
1393 1411
1394 gc_prologue(true); 1412 gc_prologue(true);
1395 increment_total_collections(true /* full gc */); 1413 increment_total_collections(true /* full gc */);
1396 1414
1397 size_t g1h_prev_used = used(); 1415 size_t g1h_prev_used = used();
3375 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 3393 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3376 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); 3394 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
3377 3395
3378 TraceMemoryManagerStats tms(false /* fullGC */); 3396 TraceMemoryManagerStats tms(false /* fullGC */);
3379 3397
3380 // If there are any free regions available on the secondary_free_list 3398 // If the secondary_free_list is not empty, append it to the
3381 // make sure we append them to the free_list. However, we don't 3399 // free_list. No need to wait for the cleanup operation to finish;
3382 // have to wait for the rest of the cleanup operation to 3400 // the region allocation code will check the secondary_free_list
3383 // finish. If it's still going on that's OK. If we run out of 3401 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3384 // regions, the region allocation code will check the 3402 // set, skip this step so that the region allocation code has to
3385 // secondary_free_list and potentially wait if more free regions 3403 // get entries from the secondary_free_list.
3386 // are coming (see new_region_try_secondary_free_list()).
3387 if (!G1StressConcRegionFreeing) { 3404 if (!G1StressConcRegionFreeing) {
3388 append_secondary_free_list_if_not_empty(); 3405 append_secondary_free_list_if_not_empty_with_lock();
3389 } 3406 }
3390 3407
3391 increment_gc_time_stamp(); 3408 increment_gc_time_stamp();
3392 3409
3393 if (g1_policy()->in_young_gc_mode()) { 3410 if (g1_policy()->in_young_gc_mode()) {
5197 HeapRegion* cur = cs_head; 5214 HeapRegion* cur = cs_head;
5198 int age_bound = -1; 5215 int age_bound = -1;
5199 size_t rs_lengths = 0; 5216 size_t rs_lengths = 0;
5200 5217
5201 while (cur != NULL) { 5218 while (cur != NULL) {
5202 assert(!is_on_free_list(cur), "sanity"); 5219 assert(!is_on_master_free_list(cur), "sanity");
5203 5220
5204 if (non_young) { 5221 if (non_young) {
5205 if (cur->is_young()) { 5222 if (cur->is_young()) {
5206 double end_sec = os::elapsedTime(); 5223 double end_sec = os::elapsedTime();
5207 double elapsed_ms = (end_sec - start_sec) * 1000.0; 5224 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5541 // any issues that we would like to catch during testing. 5558 // any issues that we would like to catch during testing.
5542 if (free_regions_coming()) { 5559 if (free_regions_coming()) {
5543 return; 5560 return;
5544 } 5561 }
5545 5562
5546 { 5563 // Make sure we append the secondary_free_list on the free_list so
5547 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 5564 // that all free regions we will come across can be safely
5548 // Make sure we append the secondary_free_list on the free_list so 5565 // attributed to the free_list.
5549 // that all free regions we will come across can be safely 5566 append_secondary_free_list_if_not_empty_with_lock();
5550 // attributed to the free_list.
5551 append_secondary_free_list();
5552 }
5553 5567
5554 // Finally, make sure that the region accounting in the lists is 5568 // Finally, make sure that the region accounting in the lists is
5555 // consistent with what we see in the heap. 5569 // consistent with what we see in the heap.
5556 _humongous_set.verify_start(); 5570 _humongous_set.verify_start();
5557 _free_list.verify_start(); 5571 _free_list.verify_start();