Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 22936:fb69749583e8
8072621: Clean up around VM_GC_Operations
Reviewed-by: brutisso, jmasa
author | mlarsson |
---|---|
date | Thu, 09 Apr 2015 15:58:49 +0200 |
parents | c04f46b4abe4 |
children | 33e421924c67 |
comparison
equal
deleted
inserted
replaced
22935:bff23dedb306 | 22936:fb69749583e8 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
351 | 351 |
352 void YoungList::print() { | 352 void YoungList::print() { |
353 HeapRegion* lists[] = {_head, _survivor_head}; | 353 HeapRegion* lists[] = {_head, _survivor_head}; |
354 const char* names[] = {"YOUNG", "SURVIVOR"}; | 354 const char* names[] = {"YOUNG", "SURVIVOR"}; |
355 | 355 |
356 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | 356 for (uint list = 0; list < ARRAY_SIZE(lists); ++list) { |
357 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | 357 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); |
358 HeapRegion *curr = lists[list]; | 358 HeapRegion *curr = lists[list]; |
359 if (curr == NULL) | 359 if (curr == NULL) |
360 gclog_or_tty->print_cr(" empty"); | 360 gclog_or_tty->print_cr(" empty"); |
361 while (curr != NULL) { | 361 while (curr != NULL) { |
825 | 825 |
826 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { | 826 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
827 assert_heap_not_locked_and_not_at_safepoint(); | 827 assert_heap_not_locked_and_not_at_safepoint(); |
828 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); | 828 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); |
829 | 829 |
830 unsigned int dummy_gc_count_before; | 830 uint dummy_gc_count_before; |
831 int dummy_gclocker_retry_count = 0; | 831 uint dummy_gclocker_retry_count = 0; |
832 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); | 832 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); |
833 } | 833 } |
834 | 834 |
835 HeapWord* | 835 HeapWord* |
836 G1CollectedHeap::mem_allocate(size_t word_size, | 836 G1CollectedHeap::mem_allocate(size_t word_size, |
837 bool* gc_overhead_limit_was_exceeded) { | 837 bool* gc_overhead_limit_was_exceeded) { |
838 assert_heap_not_locked_and_not_at_safepoint(); | 838 assert_heap_not_locked_and_not_at_safepoint(); |
839 | 839 |
840 // Loop until the allocation is satisfied, or unsatisfied after GC. | 840 // Loop until the allocation is satisfied, or unsatisfied after GC. |
841 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { | 841 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { |
842 unsigned int gc_count_before; | 842 uint gc_count_before; |
843 | 843 |
844 HeapWord* result = NULL; | 844 HeapWord* result = NULL; |
845 if (!isHumongous(word_size)) { | 845 if (!isHumongous(word_size)) { |
846 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); | 846 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); |
847 } else { | 847 } else { |
889 return NULL; | 889 return NULL; |
890 } | 890 } |
891 | 891 |
892 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, | 892 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
893 AllocationContext_t context, | 893 AllocationContext_t context, |
894 unsigned int *gc_count_before_ret, | 894 uint* gc_count_before_ret, |
895 int* gclocker_retry_count_ret) { | 895 uint* gclocker_retry_count_ret) { |
896 // Make sure you read the note in attempt_allocation_humongous(). | 896 // Make sure you read the note in attempt_allocation_humongous(). |
897 | 897 |
898 assert_heap_not_locked_and_not_at_safepoint(); | 898 assert_heap_not_locked_and_not_at_safepoint(); |
899 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " | 899 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
900 "be called for humongous allocation requests"); | 900 "be called for humongous allocation requests"); |
907 // fails to perform the allocation. b) is the only case when we'll | 907 // fails to perform the allocation. b) is the only case when we'll |
908 // return NULL. | 908 // return NULL. |
909 HeapWord* result = NULL; | 909 HeapWord* result = NULL; |
910 for (int try_count = 1; /* we'll return */; try_count += 1) { | 910 for (int try_count = 1; /* we'll return */; try_count += 1) { |
911 bool should_try_gc; | 911 bool should_try_gc; |
912 unsigned int gc_count_before; | 912 uint gc_count_before; |
913 | 913 |
914 { | 914 { |
915 MutexLockerEx x(Heap_lock); | 915 MutexLockerEx x(Heap_lock); |
916 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, | 916 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, |
917 false /* bot_updates */); | 917 false /* bot_updates */); |
951 } | 951 } |
952 | 952 |
953 if (should_try_gc) { | 953 if (should_try_gc) { |
954 bool succeeded; | 954 bool succeeded; |
955 result = do_collection_pause(word_size, gc_count_before, &succeeded, | 955 result = do_collection_pause(word_size, gc_count_before, &succeeded, |
956 GCCause::_g1_inc_collection_pause); | 956 GCCause::_g1_inc_collection_pause); |
957 if (result != NULL) { | 957 if (result != NULL) { |
958 assert(succeeded, "only way to get back a non-NULL result"); | 958 assert(succeeded, "only way to get back a non-NULL result"); |
959 return result; | 959 return result; |
960 } | 960 } |
961 | 961 |
1005 ShouldNotReachHere(); | 1005 ShouldNotReachHere(); |
1006 return NULL; | 1006 return NULL; |
1007 } | 1007 } |
1008 | 1008 |
1009 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, | 1009 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
1010 unsigned int * gc_count_before_ret, | 1010 uint* gc_count_before_ret, |
1011 int* gclocker_retry_count_ret) { | 1011 uint* gclocker_retry_count_ret) { |
1012 // The structure of this method has a lot of similarities to | 1012 // The structure of this method has a lot of similarities to |
1013 // attempt_allocation_slow(). The reason these two were not merged | 1013 // attempt_allocation_slow(). The reason these two were not merged |
1014 // into a single one is that such a method would require several "if | 1014 // into a single one is that such a method would require several "if |
1015 // allocation is not humongous do this, otherwise do that" | 1015 // allocation is not humongous do this, otherwise do that" |
1016 // conditional paths which would obscure its flow. In fact, an early | 1016 // conditional paths which would obscure its flow. In fact, an early |
1039 // fails to perform the allocation. b) is the only case when we'll | 1039 // fails to perform the allocation. b) is the only case when we'll |
1040 // return NULL. | 1040 // return NULL. |
1041 HeapWord* result = NULL; | 1041 HeapWord* result = NULL; |
1042 for (int try_count = 1; /* we'll return */; try_count += 1) { | 1042 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1043 bool should_try_gc; | 1043 bool should_try_gc; |
1044 unsigned int gc_count_before; | 1044 uint gc_count_before; |
1045 | 1045 |
1046 { | 1046 { |
1047 MutexLockerEx x(Heap_lock); | 1047 MutexLockerEx x(Heap_lock); |
1048 | 1048 |
1049 // Given that humongous objects are not allocated in young | 1049 // Given that humongous objects are not allocated in young |
1077 // do a collection pause (if we're allowed) in case it reclaims | 1077 // do a collection pause (if we're allowed) in case it reclaims |
1078 // enough space for the allocation to succeed after the pause. | 1078 // enough space for the allocation to succeed after the pause. |
1079 | 1079 |
1080 bool succeeded; | 1080 bool succeeded; |
1081 result = do_collection_pause(word_size, gc_count_before, &succeeded, | 1081 result = do_collection_pause(word_size, gc_count_before, &succeeded, |
1082 GCCause::_g1_humongous_allocation); | 1082 GCCause::_g1_humongous_allocation); |
1083 if (result != NULL) { | 1083 if (result != NULL) { |
1084 assert(succeeded, "only way to get back a non-NULL result"); | 1084 assert(succeeded, "only way to get back a non-NULL result"); |
1085 return result; | 1085 return result; |
1086 } | 1086 } |
1087 | 1087 |
1885 | 1885 |
1886 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | 1886 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); |
1887 assert(n_rem_sets > 0, "Invariant."); | 1887 assert(n_rem_sets > 0, "Invariant."); |
1888 | 1888 |
1889 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); | 1889 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); |
1890 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); | 1890 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC); |
1891 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); | 1891 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); |
1892 | 1892 |
1893 for (int i = 0; i < n_queues; i++) { | 1893 for (int i = 0; i < n_queues; i++) { |
1894 RefToScanQueue* q = new RefToScanQueue(); | 1894 RefToScanQueue* q = new RefToScanQueue(); |
1895 q->initialize(); | 1895 q->initialize(); |
2471 } | 2471 } |
2472 | 2472 |
2473 void G1CollectedHeap::collect(GCCause::Cause cause) { | 2473 void G1CollectedHeap::collect(GCCause::Cause cause) { |
2474 assert_heap_not_locked(); | 2474 assert_heap_not_locked(); |
2475 | 2475 |
2476 unsigned int gc_count_before; | 2476 uint gc_count_before; |
2477 unsigned int old_marking_count_before; | 2477 uint old_marking_count_before; |
2478 unsigned int full_gc_count_before; | 2478 uint full_gc_count_before; |
2479 bool retry_gc; | 2479 bool retry_gc; |
2480 | 2480 |
2481 do { | 2481 do { |
2482 retry_gc = false; | 2482 retry_gc = false; |
2483 | 2483 |
3611 // policy with the new heap occupancy | 3611 // policy with the new heap occupancy |
3612 Universe::update_heap_info_at_gc(); | 3612 Universe::update_heap_info_at_gc(); |
3613 } | 3613 } |
3614 | 3614 |
3615 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, | 3615 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3616 unsigned int gc_count_before, | 3616 uint gc_count_before, |
3617 bool* succeeded, | 3617 bool* succeeded, |
3618 GCCause::Cause gc_cause) { | 3618 GCCause::Cause gc_cause) { |
3619 assert_heap_not_locked_and_not_at_safepoint(); | 3619 assert_heap_not_locked_and_not_at_safepoint(); |
3620 g1_policy()->record_stop_world_start(); | 3620 g1_policy()->record_stop_world_start(); |
3621 VM_G1IncCollectionPause op(gc_count_before, | 3621 VM_G1IncCollectionPause op(gc_count_before, |