Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/collectionSetChooser.cpp @ 19083:09292c24d555
LSStackSlotAllocator: hide inner class.
author | Josef Eisl <josef.eisl@jku.at> |
---|---|
date | Sat, 31 Jan 2015 11:07:15 +0100 |
parents | 89152779163c |
children |
rev | line source |
---|---|
342 | 1 /* |
17524 | 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1103
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1103
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1103
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/g1/collectionSetChooser.hpp" | |
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | |
28 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
3914
20213c8a3c40
7050392: G1: Introduce flag to generate a log of the G1 ergonomic decisions
tonyp
parents:
2435
diff
changeset
|
29 #include "gc_implementation/g1/g1ErgoVerbose.hpp" |
1972 | 30 #include "memory/space.inline.hpp" |
342 | 31 |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
32 // Even though we don't use the GC efficiency in our heuristics as |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
33 // much as we used to, we still order according to GC efficiency. This |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
34 // will cause regions with a lot of live objects and large RSets to |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
35 // end up at the end of the array. Given that we might skip collecting |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
36 // the last few old regions, if after a few mixed GCs the remaining |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
37 // have reclaimable bytes under a certain threshold, the hope is that |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
38 // the ones we'll skip are ones with both large RSets and a lot of |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
39 // live objects, not the ones with just a lot of live objects if we |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
40 // ordered according to the amount of reclaimable bytes per region. |
6011 | 41 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { |
342 | 42 if (hr1 == NULL) { |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
43 if (hr2 == NULL) { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
44 return 0; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
45 } else { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
46 return 1; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
47 } |
342 | 48 } else if (hr2 == NULL) { |
49 return -1; | |
50 } | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
51 |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
52 double gc_eff1 = hr1->gc_efficiency(); |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
53 double gc_eff2 = hr2->gc_efficiency(); |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
54 if (gc_eff1 > gc_eff2) { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
55 return -1; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
56 } if (gc_eff1 < gc_eff2) { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
57 return 1; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
58 } else { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
59 return 0; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
60 } |
342 | 61 } |
62 | |
6011 | 63 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) { |
64 return order_regions(*hr1p, *hr2p); | |
342 | 65 } |
66 | |
67 CollectionSetChooser::CollectionSetChooser() : | |
68 // The line below is the worst bit of C++ hackery I've ever written | |
69 // (Detlefs, 11/23). You should think of it as equivalent to | |
70 // "_regions(100, true)": initialize the growable array and inform it | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
71 // that it should allocate its elem array(s) on the C heap. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
72 // |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
73 // The first argument, however, is actually a comma expression |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
74 // (set_allocation_type(this, C_HEAP), 100). The purpose of the |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
75 // set_allocation_type() call is to replace the default allocation |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
76 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
77 // allow to pass the assert in GenericGrowableArray() which checks |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
78 // that a growable array object must be on C heap if elements are. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
79 // |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
80 // Note: containing object is allocated on C heap since it is CHeapObj. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
81 // |
6011 | 82 _regions((ResourceObj::set_allocation_type((address) &_regions, |
342 | 83 ResourceObj::C_HEAP), |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
84 100), true /* C_Heap */), |
6011 | 85 _curr_index(0), _length(0), _first_par_unreserved_idx(0), |
86 _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) { | |
87 _region_live_threshold_bytes = | |
7449 | 88 HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100; |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
89 } |
342 | 90 |
91 #ifndef PRODUCT | |
6011 | 92 void CollectionSetChooser::verify() { |
93 guarantee(_length <= regions_length(), | |
94 err_msg("_length: %u regions length: %u", _length, regions_length())); | |
95 guarantee(_curr_index <= _length, | |
96 err_msg("_curr_index: %u _length: %u", _curr_index, _length)); | |
97 uint index = 0; | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
98 size_t sum_of_reclaimable_bytes = 0; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
99 while (index < _curr_index) { |
6011 | 100 guarantee(regions_at(index) == NULL, |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
101 "all entries before _curr_index should be NULL"); |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
102 index += 1; |
342 | 103 } |
104 HeapRegion *prev = NULL; | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
105 while (index < _length) { |
6011 | 106 HeapRegion *curr = regions_at(index++); |
107 guarantee(curr != NULL, "Regions in _regions array cannot be NULL"); | |
3989
b9390528617c
7095236: G1: _markedRegions never contains NULL regions
ysr
parents:
3914
diff
changeset
|
108 guarantee(!curr->is_young(), "should not be young!"); |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
109 guarantee(!curr->isHumongous(), "should not be humongous!"); |
3989
b9390528617c
7095236: G1: _markedRegions never contains NULL regions
ysr
parents:
3914
diff
changeset
|
110 if (prev != NULL) { |
6011 | 111 guarantee(order_regions(prev, curr) != 1, |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
112 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
113 prev->gc_efficiency(), curr->gc_efficiency())); |
342 | 114 } |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
115 sum_of_reclaimable_bytes += curr->reclaimable_bytes(); |
3989
b9390528617c
7095236: G1: _markedRegions never contains NULL regions
ysr
parents:
3914
diff
changeset
|
116 prev = curr; |
342 | 117 } |
6011 | 118 guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes, |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
119 err_msg("reclaimable bytes inconsistent, " |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
120 "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, |
6011 | 121 _remaining_reclaimable_bytes, sum_of_reclaimable_bytes)); |
342 | 122 } |
6011 | 123 #endif // !PRODUCT |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
124 |
6011 | 125 void CollectionSetChooser::sort_regions() { |
342 | 126 // First trim any unused portion of the top in the parallel case. |
127 if (_first_par_unreserved_idx > 0) { | |
6011 | 128 assert(_first_par_unreserved_idx <= regions_length(), |
342 | 129 "Or we didn't reserved enough length"); |
6011 | 130 regions_trunc_to(_first_par_unreserved_idx); |
342 | 131 } |
6011 | 132 _regions.sort(order_regions); |
133 assert(_length <= regions_length(), "Requirement"); | |
134 #ifdef ASSERT | |
135 for (uint i = 0; i < _length; i++) { | |
136 assert(regions_at(i) != NULL, "Should be true by sorting!"); | |
342 | 137 } |
6011 | 138 #endif // ASSERT |
2435
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
1972
diff
changeset
|
139 if (G1PrintRegionLivenessInfo) { |
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
1972
diff
changeset
|
140 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); |
6011 | 141 for (uint i = 0; i < _length; ++i) { |
142 HeapRegion* r = regions_at(i); | |
2435
371bbc844bf1
7027766: G1: introduce flag to dump the liveness information per region at the end of marking
tonyp
parents:
1972
diff
changeset
|
143 cl.doHeapRegion(r); |
342 | 144 } |
145 } | |
6011 | 146 verify(); |
342 | 147 } |
148 | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
149 |
6011 | 150 void CollectionSetChooser::add_region(HeapRegion* hr) { |
342 | 151 assert(!hr->isHumongous(), |
152 "Humongous regions shouldn't be added to the collection set"); | |
153 assert(!hr->is_young(), "should not be young!"); | |
6011 | 154 _regions.append(hr); |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
155 _length++; |
6011 | 156 _remaining_reclaimable_bytes += hr->reclaimable_bytes(); |
342 | 157 hr->calc_gc_efficiency(); |
158 } | |
159 | |
6011 | 160 void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions, |
161 uint chunk_size) { | |
342 | 162 _first_par_unreserved_idx = 0; |
6010
720b6a76dd9d
7157073: G1: type change size_t -> uint for region counts / indexes
tonyp
parents:
5964
diff
changeset
|
163 uint n_threads = (uint) ParallelGCThreads; |
4095
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
164 if (UseDynamicNumberOfGCThreads) { |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
165 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
166 "Should have been set earlier"); |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
167 // This is defensive code. As the assertion above says, the number |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
168 // of active threads should be > 0, but in case there is some path |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
169 // or some improperly initialized variable with leads to no |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
170 // active threads, protect against that in a product build. |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
171 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), |
4728
441e946dc1af
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
4095
diff
changeset
|
172 1U); |
4095
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
3989
diff
changeset
|
173 } |
6011 | 174 uint max_waste = n_threads * chunk_size; |
175 // it should be aligned with respect to chunk_size | |
176 uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size; | |
177 assert(aligned_n_regions % chunk_size == 0, "should be aligned"); | |
178 regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL); | |
342 | 179 } |
180 | |
6011 | 181 uint CollectionSetChooser::claim_array_chunk(uint chunk_size) { |
182 uint res = (uint) Atomic::add((jint) chunk_size, | |
183 (volatile jint*) &_first_par_unreserved_idx); | |
184 assert(regions_length() > res + chunk_size - 1, | |
342 | 185 "Should already have been expanded"); |
6011 | 186 return res - chunk_size; |
342 | 187 } |
188 | |
6011 | 189 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) { |
190 assert(regions_at(index) == NULL, "precondition"); | |
342 | 191 assert(!hr->is_young(), "should not be young!"); |
6011 | 192 regions_at_put(index, hr); |
342 | 193 hr->calc_gc_efficiency(); |
194 } | |
195 | |
6011 | 196 void CollectionSetChooser::update_totals(uint region_num, |
197 size_t reclaimable_bytes) { | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
198 // Only take the lock if we actually need to update the totals. |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
199 if (region_num > 0) { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
200 assert(reclaimable_bytes > 0, "invariant"); |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
201 // We could have just used atomics instead of taking the |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
202 // lock. However, we currently don't have an atomic add for size_t. |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
203 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
6011 | 204 _length += region_num; |
205 _remaining_reclaimable_bytes += reclaimable_bytes; | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
206 } else { |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
207 assert(reclaimable_bytes == 0, "invariant"); |
342 | 208 } |
209 } | |
210 | |
6011 | 211 void CollectionSetChooser::clear() { |
212 _regions.clear(); | |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
213 _curr_index = 0; |
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
214 _length = 0; |
6011 | 215 _remaining_reclaimable_bytes = 0; |
4912
a9647476d1a4
7132029: G1: mixed GC phase lasts for longer than it should
tonyp
parents:
4728
diff
changeset
|
216 }; |