Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 17773:8ee855b4e667
8036025: Sort the freelist in order to shrink the heap
Summary: The free list is being maintained in a sorted fashion and old and humongous regions are allocated from the bottom of the heap while young regions are allocated at the top.
Reviewed-by: tschatzl, mgerdin
Contributed-by: jesper.wilhelmsson@oracle.com, staffan.friberg@oracle.com
author | jwilhelm |
---|---|
date | Fri, 28 Feb 2014 15:27:09 +0100 |
parents | bc22cbb8b45a |
children | 2775f322649a |
comparison
equal
deleted
inserted
replaced
17763:6e7e363c5a8f | 17773:8ee855b4e667 |
---|---|
514 G1CollectedHeap* G1CollectedHeap::_g1h; | 514 G1CollectedHeap* G1CollectedHeap::_g1h; |
515 | 515 |
516 // Private methods. | 516 // Private methods. |
517 | 517 |
518 HeapRegion* | 518 HeapRegion* |
519 G1CollectedHeap::new_region_try_secondary_free_list() { | 519 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) { |
520 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | 520 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
521 while (!_secondary_free_list.is_empty() || free_regions_coming()) { | 521 while (!_secondary_free_list.is_empty() || free_regions_coming()) { |
522 if (!_secondary_free_list.is_empty()) { | 522 if (!_secondary_free_list.is_empty()) { |
523 if (G1ConcRegionFreeingVerbose) { | 523 if (G1ConcRegionFreeingVerbose) { |
524 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | 524 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
530 // again to allocate from it. | 530 // again to allocate from it. |
531 append_secondary_free_list(); | 531 append_secondary_free_list(); |
532 | 532 |
533 assert(!_free_list.is_empty(), "if the secondary_free_list was not " | 533 assert(!_free_list.is_empty(), "if the secondary_free_list was not " |
534 "empty we should have moved at least one entry to the free_list"); | 534 "empty we should have moved at least one entry to the free_list"); |
535 HeapRegion* res = _free_list.remove_head(); | 535 HeapRegion* res = _free_list.remove_region(is_old); |
536 if (G1ConcRegionFreeingVerbose) { | 536 if (G1ConcRegionFreeingVerbose) { |
537 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | 537 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
538 "allocated "HR_FORMAT" from secondary_free_list", | 538 "allocated "HR_FORMAT" from secondary_free_list", |
539 HR_FORMAT_PARAMS(res)); | 539 HR_FORMAT_PARAMS(res)); |
540 } | 540 } |
552 "could not allocate from secondary_free_list"); | 552 "could not allocate from secondary_free_list"); |
553 } | 553 } |
554 return NULL; | 554 return NULL; |
555 } | 555 } |
556 | 556 |
557 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { | 557 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) { |
558 assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords, | 558 assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords, |
559 "the only time we use this to allocate a humongous region is " | 559 "the only time we use this to allocate a humongous region is " |
560 "when we are allocating a single humongous region"); | 560 "when we are allocating a single humongous region"); |
561 | 561 |
562 HeapRegion* res; | 562 HeapRegion* res; |
564 if (!_secondary_free_list.is_empty()) { | 564 if (!_secondary_free_list.is_empty()) { |
565 if (G1ConcRegionFreeingVerbose) { | 565 if (G1ConcRegionFreeingVerbose) { |
566 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | 566 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
567 "forced to look at the secondary_free_list"); | 567 "forced to look at the secondary_free_list"); |
568 } | 568 } |
569 res = new_region_try_secondary_free_list(); | 569 res = new_region_try_secondary_free_list(is_old); |
570 if (res != NULL) { | 570 if (res != NULL) { |
571 return res; | 571 return res; |
572 } | 572 } |
573 } | 573 } |
574 } | 574 } |
575 res = _free_list.remove_head_or_null(); | 575 |
576 res = _free_list.remove_region(is_old); | |
577 | |
576 if (res == NULL) { | 578 if (res == NULL) { |
577 if (G1ConcRegionFreeingVerbose) { | 579 if (G1ConcRegionFreeingVerbose) { |
578 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | 580 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
579 "res == NULL, trying the secondary_free_list"); | 581 "res == NULL, trying the secondary_free_list"); |
580 } | 582 } |
581 res = new_region_try_secondary_free_list(); | 583 res = new_region_try_secondary_free_list(is_old); |
582 } | 584 } |
583 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { | 585 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { |
584 // Currently, only attempts to allocate GC alloc regions set | 586 // Currently, only attempts to allocate GC alloc regions set |
585 // do_expand to true. So, we should only reach here during a | 587 // do_expand to true. So, we should only reach here during a |
586 // safepoint. If this assumption changes we might have to | 588 // safepoint. If this assumption changes we might have to |
593 ergo_format_byte("allocation request"), | 595 ergo_format_byte("allocation request"), |
594 word_size * HeapWordSize); | 596 word_size * HeapWordSize); |
595 if (expand(word_size * HeapWordSize)) { | 597 if (expand(word_size * HeapWordSize)) { |
596 // Given that expand() succeeded in expanding the heap, and we | 598 // Given that expand() succeeded in expanding the heap, and we |
597 // always expand the heap by an amount aligned to the heap | 599 // always expand the heap by an amount aligned to the heap |
598 // region size, the free list should in theory not be empty. So | 600 // region size, the free list should in theory not be empty. |
599 // it would probably be OK to use remove_head(). But the extra | 601 // In either case remove_region() will check for NULL. |
600 // check for NULL is unlikely to be a performance issue here (we | 602 res = _free_list.remove_region(is_old); |
601 // just expanded the heap!) so let's just be conservative and | |
602 // use remove_head_or_null(). | |
603 res = _free_list.remove_head_or_null(); | |
604 } else { | 603 } else { |
605 _expand_heap_after_alloc_failure = false; | 604 _expand_heap_after_alloc_failure = false; |
606 } | 605 } |
607 } | 606 } |
608 return res; | 607 return res; |
616 uint first = G1_NULL_HRS_INDEX; | 615 uint first = G1_NULL_HRS_INDEX; |
617 if (num_regions == 1) { | 616 if (num_regions == 1) { |
618 // Only one region to allocate, no need to go through the slower | 617 // Only one region to allocate, no need to go through the slower |
619 // path. The caller will attempt the expansion if this fails, so | 618 // path. The caller will attempt the expansion if this fails, so |
620 // let's not try to expand here too. | 619 // let's not try to expand here too. |
621 HeapRegion* hr = new_region(word_size, false /* do_expand */); | 620 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); |
622 if (hr != NULL) { | 621 if (hr != NULL) { |
623 first = hr->hrs_index(); | 622 first = hr->hrs_index(); |
624 } else { | 623 } else { |
625 first = G1_NULL_HRS_INDEX; | 624 first = G1_NULL_HRS_INDEX; |
626 } | 625 } |
5935 // (since we don't refine cards in young regions). | 5934 // (since we don't refine cards in young regions). |
5936 if (!hr->is_young()) { | 5935 if (!hr->is_young()) { |
5937 _cg1r->hot_card_cache()->reset_card_counts(hr); | 5936 _cg1r->hot_card_cache()->reset_card_counts(hr); |
5938 } | 5937 } |
5939 hr->hr_clear(par, true /* clear_space */, locked /* locked */); | 5938 hr->hr_clear(par, true /* clear_space */, locked /* locked */); |
5940 free_list->add_as_head(hr); | 5939 free_list->add_ordered(hr); |
5941 } | 5940 } |
5942 | 5941 |
5943 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, | 5942 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, |
5944 FreeRegionList* free_list, | 5943 FreeRegionList* free_list, |
5945 bool par) { | 5944 bool par) { |
5975 | 5974 |
5976 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { | 5975 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { |
5977 assert(list != NULL, "list can't be null"); | 5976 assert(list != NULL, "list can't be null"); |
5978 if (!list->is_empty()) { | 5977 if (!list->is_empty()) { |
5979 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | 5978 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
5980 _free_list.add_as_head(list); | 5979 _free_list.add_ordered(list); |
5981 } | 5980 } |
5982 } | 5981 } |
5983 | 5982 |
5984 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { | 5983 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { |
5985 assert(_summary_bytes_used >= bytes, | 5984 assert(_summary_bytes_used >= bytes, |
6441 assert(!force || g1_policy()->can_expand_young_list(), | 6440 assert(!force || g1_policy()->can_expand_young_list(), |
6442 "if force is true we should be able to expand the young list"); | 6441 "if force is true we should be able to expand the young list"); |
6443 bool young_list_full = g1_policy()->is_young_list_full(); | 6442 bool young_list_full = g1_policy()->is_young_list_full(); |
6444 if (force || !young_list_full) { | 6443 if (force || !young_list_full) { |
6445 HeapRegion* new_alloc_region = new_region(word_size, | 6444 HeapRegion* new_alloc_region = new_region(word_size, |
6445 false /* is_old */, | |
6446 false /* do_expand */); | 6446 false /* do_expand */); |
6447 if (new_alloc_region != NULL) { | 6447 if (new_alloc_region != NULL) { |
6448 set_region_short_lived_locked(new_alloc_region); | 6448 set_region_short_lived_locked(new_alloc_region); |
6449 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); | 6449 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); |
6450 return new_alloc_region; | 6450 return new_alloc_region; |
6499 uint count, | 6499 uint count, |
6500 GCAllocPurpose ap) { | 6500 GCAllocPurpose ap) { |
6501 assert(FreeList_lock->owned_by_self(), "pre-condition"); | 6501 assert(FreeList_lock->owned_by_self(), "pre-condition"); |
6502 | 6502 |
6503 if (count < g1_policy()->max_regions(ap)) { | 6503 if (count < g1_policy()->max_regions(ap)) { |
6504 bool survivor = (ap == GCAllocForSurvived); | |
6504 HeapRegion* new_alloc_region = new_region(word_size, | 6505 HeapRegion* new_alloc_region = new_region(word_size, |
6506 !survivor, | |
6505 true /* do_expand */); | 6507 true /* do_expand */); |
6506 if (new_alloc_region != NULL) { | 6508 if (new_alloc_region != NULL) { |
6507 // We really only need to do this for old regions given that we | 6509 // We really only need to do this for old regions given that we |
6508 // should never scan survivors. But it doesn't hurt to do it | 6510 // should never scan survivors. But it doesn't hurt to do it |
6509 // for survivors too. | 6511 // for survivors too. |
6510 new_alloc_region->set_saved_mark(); | 6512 new_alloc_region->set_saved_mark(); |
6511 if (ap == GCAllocForSurvived) { | 6513 if (survivor) { |
6512 new_alloc_region->set_survivor(); | 6514 new_alloc_region->set_survivor(); |
6513 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); | 6515 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); |
6514 } else { | 6516 } else { |
6515 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); | 6517 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); |
6516 } | 6518 } |