Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1717:688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
Summary: There are a few issues in the code that calculates whether to resize the heap and by how much: a) some calculations can overflow 32-bit size_t's, b) min_desired_capacity is not bounded by the max heap size, and c) the assrt that fires is in the wrong place. The fix also includes some tidying up of the related verbose code.
Reviewed-by: ysr, jmasa
author | tonyp |
---|---|
date | Tue, 17 Aug 2010 14:40:00 -0400 |
parents | 5f429ee79634 |
children | bb847e31b836 |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
28 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
29 |
342 | 30 // turn it on so that the contents of the young list (scan-only / |
31 // to-be-collected) are printed at "strategic" points before / during | |
32 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
33 #define YOUNG_LIST_VERBOSE 0 |
342 | 34 // CURRENT STATUS |
35 // This file is under construction. Search for "FIXME". | |
36 | |
37 // INVARIANTS/NOTES | |
38 // | |
39 // All allocation activity covered by the G1CollectedHeap interface is | |
40 // serialized by acquiring the HeapLock. This happens in | |
41 // mem_allocate_work, which all such allocation functions call. | |
42 // (Note that this does not apply to TLAB allocation, which is not part | |
43 // of this interface: it is done by clients of this interface.) | |
44 | |
45 // Local to this file. | |
46 | |
47 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
48 SuspendibleThreadSet* _sts; | |
49 G1RemSet* _g1rs; | |
50 ConcurrentG1Refine* _cg1r; | |
51 bool _concurrent; | |
52 public: | |
53 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
54 G1RemSet* g1rs, | |
55 ConcurrentG1Refine* cg1r) : | |
56 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
57 {} | |
58 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 59 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
60 // This path is executed by the concurrent refine or mutator threads, | |
61 // concurrently, and so we do not care if card_ptr contains references | |
62 // that point into the collection set. | |
63 assert(!oops_into_cset, "should be"); | |
64 | |
342 | 65 if (_concurrent && _sts->should_yield()) { |
66 // Caller will actually yield. | |
67 return false; | |
68 } | |
69 // Otherwise, we finished successfully; return true. | |
70 return true; | |
71 } | |
72 void set_concurrent(bool b) { _concurrent = b; } | |
73 }; | |
74 | |
75 | |
76 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
77 int _calls; | |
78 G1CollectedHeap* _g1h; | |
79 CardTableModRefBS* _ctbs; | |
80 int _histo[256]; | |
81 public: | |
82 ClearLoggedCardTableEntryClosure() : | |
83 _calls(0) | |
84 { | |
85 _g1h = G1CollectedHeap::heap(); | |
86 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
87 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
88 } | |
89 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
90 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
91 _calls++; | |
92 unsigned char* ujb = (unsigned char*)card_ptr; | |
93 int ind = (int)(*ujb); | |
94 _histo[ind]++; | |
95 *card_ptr = -1; | |
96 } | |
97 return true; | |
98 } | |
99 int calls() { return _calls; } | |
100 void print_histo() { | |
101 gclog_or_tty->print_cr("Card table value histogram:"); | |
102 for (int i = 0; i < 256; i++) { | |
103 if (_histo[i] != 0) { | |
104 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
105 } | |
106 } | |
107 } | |
108 }; | |
109 | |
110 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
111 int _calls; | |
112 G1CollectedHeap* _g1h; | |
113 CardTableModRefBS* _ctbs; | |
114 public: | |
115 RedirtyLoggedCardTableEntryClosure() : | |
116 _calls(0) | |
117 { | |
118 _g1h = G1CollectedHeap::heap(); | |
119 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
120 } | |
121 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
122 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
123 _calls++; | |
124 *card_ptr = 0; | |
125 } | |
126 return true; | |
127 } | |
128 int calls() { return _calls; } | |
129 }; | |
130 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
131 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
132 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
133 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
134 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
135 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
136 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
137 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
138 |
342 | 139 YoungList::YoungList(G1CollectedHeap* g1h) |
140 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
141 _length(0), |
342 | 142 _last_sampled_rs_lengths(0), |
545 | 143 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 144 { |
145 guarantee( check_list_empty(false), "just making sure..." ); | |
146 } | |
147 | |
148 void YoungList::push_region(HeapRegion *hr) { | |
149 assert(!hr->is_young(), "should not already be young"); | |
150 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
151 | |
152 hr->set_next_young_region(_head); | |
153 _head = hr; | |
154 | |
155 hr->set_young(); | |
156 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
157 ++_length; | |
158 } | |
159 | |
160 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 161 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 162 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
163 | |
164 hr->set_next_young_region(_survivor_head); | |
165 if (_survivor_head == NULL) { | |
545 | 166 _survivor_tail = hr; |
342 | 167 } |
168 _survivor_head = hr; | |
169 | |
170 ++_survivor_length; | |
171 } | |
172 | |
173 void YoungList::empty_list(HeapRegion* list) { | |
174 while (list != NULL) { | |
175 HeapRegion* next = list->get_next_young_region(); | |
176 list->set_next_young_region(NULL); | |
177 list->uninstall_surv_rate_group(); | |
178 list->set_not_young(); | |
179 list = next; | |
180 } | |
181 } | |
182 | |
183 void YoungList::empty_list() { | |
184 assert(check_list_well_formed(), "young list should be well formed"); | |
185 | |
186 empty_list(_head); | |
187 _head = NULL; | |
188 _length = 0; | |
189 | |
190 empty_list(_survivor_head); | |
191 _survivor_head = NULL; | |
545 | 192 _survivor_tail = NULL; |
342 | 193 _survivor_length = 0; |
194 | |
195 _last_sampled_rs_lengths = 0; | |
196 | |
197 assert(check_list_empty(false), "just making sure..."); | |
198 } | |
199 | |
200 bool YoungList::check_list_well_formed() { | |
201 bool ret = true; | |
202 | |
203 size_t length = 0; | |
204 HeapRegion* curr = _head; | |
205 HeapRegion* last = NULL; | |
206 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
207 if (!curr->is_young()) { |
342 | 208 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
209 "incorrectly tagged (y: %d, surv: %d)", |
342 | 210 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
211 curr->is_young(), curr->is_survivor()); |
342 | 212 ret = false; |
213 } | |
214 ++length; | |
215 last = curr; | |
216 curr = curr->get_next_young_region(); | |
217 } | |
218 ret = ret && (length == _length); | |
219 | |
220 if (!ret) { | |
221 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
222 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
223 length, _length); | |
224 } | |
225 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
226 return ret; |
342 | 227 } |
228 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 bool YoungList::check_list_empty(bool check_sample) { |
342 | 230 bool ret = true; |
231 | |
232 if (_length != 0) { | |
233 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
234 _length); | |
235 ret = false; | |
236 } | |
237 if (check_sample && _last_sampled_rs_lengths != 0) { | |
238 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
239 ret = false; | |
240 } | |
241 if (_head != NULL) { | |
242 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
243 ret = false; | |
244 } | |
245 if (!ret) { | |
246 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
247 } | |
248 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
249 return ret; |
342 | 250 } |
251 | |
252 void | |
253 YoungList::rs_length_sampling_init() { | |
254 _sampled_rs_lengths = 0; | |
255 _curr = _head; | |
256 } | |
257 | |
258 bool | |
259 YoungList::rs_length_sampling_more() { | |
260 return _curr != NULL; | |
261 } | |
262 | |
263 void | |
264 YoungList::rs_length_sampling_next() { | |
265 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
266 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
267 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
268 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
269 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
270 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
272 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
273 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
274 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
275 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
276 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
277 |
342 | 278 _curr = _curr->get_next_young_region(); |
279 if (_curr == NULL) { | |
280 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
281 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
282 } | |
283 } | |
284 | |
285 void | |
286 YoungList::reset_auxilary_lists() { | |
287 guarantee( is_empty(), "young list should be empty" ); | |
288 assert(check_list_well_formed(), "young list should be well formed"); | |
289 | |
290 // Add survivor regions to SurvRateGroup. | |
291 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 292 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 |
342 | 294 for (HeapRegion* curr = _survivor_head; |
295 curr != NULL; | |
296 curr = curr->get_next_young_region()) { | |
297 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
300 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
301 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
302 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 303 } |
304 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
305 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
306 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
307 _length = _survivor_length; |
342 | 308 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
309 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
310 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
311 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
312 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
313 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
314 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
316 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
317 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
318 |
545 | 319 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 320 |
321 assert(check_list_well_formed(), "young list should be well formed"); | |
322 } | |
323 | |
324 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
325 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
326 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 327 |
328 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
329 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
330 HeapRegion *curr = lists[list]; | |
331 if (curr == NULL) | |
332 gclog_or_tty->print_cr(" empty"); | |
333 while (curr != NULL) { | |
334 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 "age: %4d, y: %d, surv: %d", |
342 | 336 curr->bottom(), curr->end(), |
337 curr->top(), | |
338 curr->prev_top_at_mark_start(), | |
339 curr->next_top_at_mark_start(), | |
340 curr->top_at_conc_mark_count(), | |
341 curr->age_in_surv_rate_group_cond(), | |
342 curr->is_young(), | |
343 curr->is_survivor()); | |
344 curr = curr->get_next_young_region(); | |
345 } | |
346 } | |
347 | |
348 gclog_or_tty->print_cr(""); | |
349 } | |
350 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
351 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
352 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
353 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
354 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
355 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
356 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
357 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
358 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
359 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
360 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
361 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
362 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
363 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
364 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
365 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
366 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
367 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
368 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
369 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
370 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
371 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
372 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
342 | 404 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 405 _cg1r->stop(); |
342 | 406 _czft->stop(); |
407 _cmThread->stop(); | |
408 } | |
409 | |
410 | |
411 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
412 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
413 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
414 | |
415 // Count the dirty cards at the start. | |
416 CountNonCleanMemRegionClosure count1(this); | |
417 ct_bs->mod_card_iterate(&count1); | |
418 int orig_count = count1.n(); | |
419 | |
420 // First clear the logged cards. | |
421 ClearLoggedCardTableEntryClosure clear; | |
422 dcqs.set_closure(&clear); | |
423 dcqs.apply_closure_to_all_completed_buffers(); | |
424 dcqs.iterate_closure_all_threads(false); | |
425 clear.print_histo(); | |
426 | |
427 // Now ensure that there's no dirty cards. | |
428 CountNonCleanMemRegionClosure count2(this); | |
429 ct_bs->mod_card_iterate(&count2); | |
430 if (count2.n() != 0) { | |
431 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
432 count2.n(), orig_count); | |
433 } | |
434 guarantee(count2.n() == 0, "Card table should be clean."); | |
435 | |
436 RedirtyLoggedCardTableEntryClosure redirty; | |
437 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
438 dcqs.apply_closure_to_all_completed_buffers(); | |
439 dcqs.iterate_closure_all_threads(false); | |
440 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
441 clear.calls(), orig_count); | |
442 guarantee(redirty.calls() == clear.calls(), | |
443 "Or else mechanism is broken."); | |
444 | |
445 CountNonCleanMemRegionClosure count3(this); | |
446 ct_bs->mod_card_iterate(&count3); | |
447 if (count3.n() != orig_count) { | |
448 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
449 orig_count, count3.n()); | |
450 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
451 } | |
452 | |
453 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
454 } | |
455 | |
456 // Private class members. | |
457 | |
458 G1CollectedHeap* G1CollectedHeap::_g1h; | |
459 | |
460 // Private methods. | |
461 | |
462 // Finds a HeapRegion that can be used to allocate a given size of block. | |
463 | |
464 | |
465 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
466 bool do_expand, | |
467 bool zero_filled) { | |
468 ConcurrentZFThread::note_region_alloc(); | |
469 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
470 if (res == NULL && do_expand) { | |
471 expand(word_size * HeapWordSize); | |
472 res = alloc_free_region_from_lists(zero_filled); | |
473 assert(res == NULL || | |
474 (!res->isHumongous() && | |
475 (!zero_filled || | |
476 res->zero_fill_state() == HeapRegion::Allocated)), | |
477 "Alloc Regions must be zero filled (and non-H)"); | |
478 } | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
479 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
480 if (res->is_empty()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
481 _free_regions--; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
482 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
483 assert(!res->isHumongous() && |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
484 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
485 err_msg("Non-young alloc Regions must be zero filled (and non-H):" |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
486 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
487 res->isHumongous(), zero_filled, res->zero_fill_state())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
488 assert(!res->is_on_unclean_list(), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
489 "Alloc Regions must not be on the unclean list"); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
490 if (G1PrintHeapRegions) { |
342 | 491 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
492 "top "PTR_FORMAT, | |
493 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
494 } | |
495 } | |
496 return res; | |
497 } | |
498 | |
499 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
500 size_t word_size, | |
501 bool zero_filled) { | |
502 HeapRegion* alloc_region = NULL; | |
503 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
504 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
505 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 506 alloc_region->set_survivor(); |
342 | 507 } |
508 ++_gc_alloc_region_counts[purpose]; | |
509 } else { | |
510 g1_policy()->note_alloc_region_limit_reached(purpose); | |
511 } | |
512 return alloc_region; | |
513 } | |
514 | |
515 // If could fit into free regions w/o expansion, try. | |
516 // Otherwise, if can expand, do so. | |
517 // Otherwise, if using ex regions might help, try with ex given back. | |
518 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
519 assert(regions_accounted_for(), "Region leakage!"); | |
520 | |
521 // We can't allocate H regions while cleanupComplete is running, since | |
522 // some of the regions we find to be empty might not yet be added to the | |
523 // unclean list. (If we're already at a safepoint, this call is | |
524 // unnecessary, not to mention wrong.) | |
525 if (!SafepointSynchronize::is_at_safepoint()) | |
526 wait_for_cleanup_complete(); | |
527 | |
528 size_t num_regions = | |
529 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
530 | |
531 // Special case if < one region??? | |
532 | |
533 // Remember the ft size. | |
534 size_t x_size = expansion_regions(); | |
535 | |
536 HeapWord* res = NULL; | |
537 bool eliminated_allocated_from_lists = false; | |
538 | |
539 // Can the allocation potentially fit in the free regions? | |
540 if (free_regions() >= num_regions) { | |
541 res = _hrs->obj_allocate(word_size); | |
542 } | |
543 if (res == NULL) { | |
544 // Try expansion. | |
545 size_t fs = _hrs->free_suffix(); | |
546 if (fs + x_size >= num_regions) { | |
547 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
548 res = _hrs->obj_allocate(word_size); | |
549 assert(res != NULL, "This should have worked."); | |
550 } else { | |
551 // Expansion won't help. Are there enough free regions if we get rid | |
552 // of reservations? | |
553 size_t avail = free_regions(); | |
554 if (avail >= num_regions) { | |
555 res = _hrs->obj_allocate(word_size); | |
556 if (res != NULL) { | |
557 remove_allocated_regions_from_lists(); | |
558 eliminated_allocated_from_lists = true; | |
559 } | |
560 } | |
561 } | |
562 } | |
563 if (res != NULL) { | |
564 // Increment by the number of regions allocated. | |
565 // FIXME: Assumes regions all of size GrainBytes. | |
566 #ifndef PRODUCT | |
567 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
568 HeapRegion::GrainWords)); | |
569 #endif | |
570 if (!eliminated_allocated_from_lists) | |
571 remove_allocated_regions_from_lists(); | |
572 _summary_bytes_used += word_size * HeapWordSize; | |
573 _free_regions -= num_regions; | |
574 _num_humongous_regions += (int) num_regions; | |
575 } | |
576 assert(regions_accounted_for(), "Region Leakage"); | |
577 return res; | |
578 } | |
579 | |
580 HeapWord* | |
581 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
582 bool permit_collection_pause) { | |
583 HeapWord* res = NULL; | |
584 HeapRegion* allocated_young_region = NULL; | |
585 | |
586 assert( SafepointSynchronize::is_at_safepoint() || | |
587 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
588 | |
589 if (isHumongous(word_size)) { | |
590 // Allocation of a humongous object can, in a sense, complete a | |
591 // partial region, if the previous alloc was also humongous, and | |
592 // caused the test below to succeed. | |
593 if (permit_collection_pause) | |
594 do_collection_pause_if_appropriate(word_size); | |
595 res = humongousObjAllocate(word_size); | |
596 assert(_cur_alloc_region == NULL | |
597 || !_cur_alloc_region->isHumongous(), | |
598 "Prevent a regression of this bug."); | |
599 | |
600 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
601 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
602 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
603 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
604 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
605 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
606 wait_for_cleanup_complete(); |
342 | 607 // If we do a collection pause, this will be reset to a non-NULL |
608 // value. If we don't, nulling here ensures that we allocate a new | |
609 // region below. | |
610 if (_cur_alloc_region != NULL) { | |
611 // We're finished with the _cur_alloc_region. | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
612 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
613 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
614 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
615 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
616 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
617 } |
342 | 618 _summary_bytes_used += _cur_alloc_region->used(); |
619 _cur_alloc_region = NULL; | |
620 } | |
621 assert(_cur_alloc_region == NULL, "Invariant."); | |
622 // Completion of a heap region is perhaps a good point at which to do | |
623 // a collection pause. | |
624 if (permit_collection_pause) | |
625 do_collection_pause_if_appropriate(word_size); | |
626 // Make sure we have an allocation region available. | |
627 if (_cur_alloc_region == NULL) { | |
628 if (!SafepointSynchronize::is_at_safepoint()) | |
629 wait_for_cleanup_complete(); | |
630 bool next_is_young = should_set_young_locked(); | |
631 // If the next region is not young, make sure it's zero-filled. | |
632 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
633 if (_cur_alloc_region != NULL) { | |
634 _summary_bytes_used -= _cur_alloc_region->used(); | |
635 if (next_is_young) { | |
636 set_region_short_lived_locked(_cur_alloc_region); | |
637 allocated_young_region = _cur_alloc_region; | |
638 } | |
639 } | |
640 } | |
641 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
642 "Prevent a regression of this bug."); | |
643 | |
644 // Now retry the allocation. | |
645 if (_cur_alloc_region != NULL) { | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
646 if (allocated_young_region != NULL) { |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
647 // We need to ensure that the store to top does not |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
648 // float above the setting of the young type. |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
649 OrderAccess::storestore(); |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
650 } |
342 | 651 res = _cur_alloc_region->allocate(word_size); |
652 } | |
653 } | |
654 | |
655 // NOTE: fails frequently in PRT | |
656 assert(regions_accounted_for(), "Region leakage!"); | |
657 | |
658 if (res != NULL) { | |
659 if (!SafepointSynchronize::is_at_safepoint()) { | |
660 assert( permit_collection_pause, "invariant" ); | |
661 assert( Heap_lock->owned_by_self(), "invariant" ); | |
662 Heap_lock->unlock(); | |
663 } | |
664 | |
665 if (allocated_young_region != NULL) { | |
666 HeapRegion* hr = allocated_young_region; | |
667 HeapWord* bottom = hr->bottom(); | |
668 HeapWord* end = hr->end(); | |
669 MemRegion mr(bottom, end); | |
670 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
671 } | |
672 } | |
673 | |
674 assert( SafepointSynchronize::is_at_safepoint() || | |
675 (res == NULL && Heap_lock->owned_by_self()) || | |
676 (res != NULL && !Heap_lock->owned_by_self()), | |
677 "post condition of the call" ); | |
678 | |
679 return res; | |
680 } | |
681 | |
682 HeapWord* | |
683 G1CollectedHeap::mem_allocate(size_t word_size, | |
684 bool is_noref, | |
685 bool is_tlab, | |
686 bool* gc_overhead_limit_was_exceeded) { | |
687 debug_only(check_for_valid_allocation_state()); | |
688 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
689 HeapWord* result = NULL; | |
690 | |
691 // Loop until the allocation is satisified, | |
692 // or unsatisfied after GC. | |
693 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
694 int gc_count_before; | |
695 { | |
696 Heap_lock->lock(); | |
697 result = attempt_allocation(word_size); | |
698 if (result != NULL) { | |
699 // attempt_allocation should have unlocked the heap lock | |
700 assert(is_in(result), "result not in heap"); | |
701 return result; | |
702 } | |
703 // Read the gc count while the heap lock is held. | |
704 gc_count_before = SharedHeap::heap()->total_collections(); | |
705 Heap_lock->unlock(); | |
706 } | |
707 | |
708 // Create the garbage collection operation... | |
709 VM_G1CollectForAllocation op(word_size, | |
710 gc_count_before); | |
711 | |
712 // ...and get the VM thread to execute it. | |
713 VMThread::execute(&op); | |
714 if (op.prologue_succeeded()) { | |
715 result = op.result(); | |
716 assert(result == NULL || is_in(result), "result not in heap"); | |
717 return result; | |
718 } | |
719 | |
720 // Give a warning if we seem to be looping forever. | |
721 if ((QueuedAllocationWarningCount > 0) && | |
722 (try_count % QueuedAllocationWarningCount == 0)) { | |
723 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
724 try_count); | |
725 } | |
726 } | |
727 } | |
728 | |
729 void G1CollectedHeap::abandon_cur_alloc_region() { | |
730 if (_cur_alloc_region != NULL) { | |
731 // We're finished with the _cur_alloc_region. | |
732 if (_cur_alloc_region->is_empty()) { | |
733 _free_regions++; | |
734 free_region(_cur_alloc_region); | |
735 } else { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
736 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
737 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
738 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
739 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
740 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
741 } |
342 | 742 _summary_bytes_used += _cur_alloc_region->used(); |
743 } | |
744 _cur_alloc_region = NULL; | |
745 } | |
746 } | |
747 | |
636 | 748 void G1CollectedHeap::abandon_gc_alloc_regions() { |
749 // first, make sure that the GC alloc region list is empty (it should!) | |
750 assert(_gc_alloc_region_list == NULL, "invariant"); | |
751 release_gc_alloc_regions(true /* totally */); | |
752 } | |
753 | |
342 | 754 class PostMCRemSetClearClosure: public HeapRegionClosure { |
755 ModRefBarrierSet* _mr_bs; | |
756 public: | |
757 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
758 bool doHeapRegion(HeapRegion* r) { | |
759 r->reset_gc_time_stamp(); | |
760 if (r->continuesHumongous()) | |
761 return false; | |
762 HeapRegionRemSet* hrrs = r->rem_set(); | |
763 if (hrrs != NULL) hrrs->clear(); | |
764 // You might think here that we could clear just the cards | |
765 // corresponding to the used region. But no: if we leave a dirty card | |
766 // in a region we might allocate into, then it would prevent that card | |
767 // from being enqueued, and cause it to be missed. | |
768 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
769 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
770 return false; | |
771 } | |
772 }; | |
773 | |
774 | |
775 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
776 ModRefBarrierSet* _mr_bs; | |
777 public: | |
778 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
779 bool doHeapRegion(HeapRegion* r) { | |
780 if (r->continuesHumongous()) return false; | |
781 if (r->used_region().word_size() != 0) { | |
782 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
783 } | |
784 return false; | |
785 } | |
786 }; | |
787 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
788 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
789 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
790 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
791 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
792 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
793 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
794 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
795 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
796 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
797 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
798 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
799 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
800 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
801 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
802 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
803 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
804 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
805 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
806 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
807 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
808 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
809 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
810 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
811 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
812 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
813 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
814 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
815 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
816 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
817 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
818 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
819 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
820 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
821 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
822 void G1CollectedHeap::do_collection(bool explicit_gc, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
823 bool clear_all_soft_refs, |
342 | 824 size_t word_size) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
825 if (GC_locker::check_active_before_gc()) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
826 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
827 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
828 |
342 | 829 ResourceMark rm; |
830 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
831 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
832 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
833 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
834 |
342 | 835 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
836 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
837 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
838 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
839 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
840 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
841 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
842 |
342 | 843 { |
844 IsGCActiveMark x; | |
845 | |
846 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
847 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
848 assert(!system_gc || explicit_gc, "invariant"); |
342 | 849 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
850 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
851 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
852 PrintGC, true, gclog_or_tty); |
342 | 853 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
854 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
855 |
342 | 856 double start = os::elapsedTime(); |
857 g1_policy()->record_full_collection_start(); | |
858 | |
859 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
860 increment_total_collections(true /* full gc */); |
342 | 861 |
862 size_t g1h_prev_used = used(); | |
863 assert(used() == recalculate_used(), "Should be equal"); | |
864 | |
865 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
866 HandleMark hm; // Discard invalid handles created during verification | |
867 prepare_for_verify(); | |
868 gclog_or_tty->print(" VerifyBeforeGC:"); | |
869 Universe::verify(true); | |
870 } | |
871 assert(regions_accounted_for(), "Region leakage!"); | |
872 | |
873 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
874 | |
875 // We want to discover references, but not process them yet. | |
876 // This mode is disabled in | |
877 // instanceRefKlass::process_discovered_references if the | |
878 // generation does some collection work, or | |
879 // instanceRefKlass::enqueue_discovered_references if the | |
880 // generation returns without doing any work. | |
881 ref_processor()->disable_discovery(); | |
882 ref_processor()->abandon_partial_discovery(); | |
883 ref_processor()->verify_no_references_recorded(); | |
884 | |
885 // Abandon current iterations of concurrent marking and concurrent | |
886 // refinement, if any are in progress. | |
887 concurrent_mark()->abort(); | |
888 | |
889 // Make sure we'll choose a new allocation region afterwards. | |
890 abandon_cur_alloc_region(); | |
636 | 891 abandon_gc_alloc_regions(); |
342 | 892 assert(_cur_alloc_region == NULL, "Invariant."); |
893 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
894 tear_down_region_lists(); | |
895 set_used_regions_to_need_zero_fill(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
896 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
897 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
898 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
899 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
900 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
901 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
902 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
903 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
904 |
342 | 905 if (g1_policy()->in_young_gc_mode()) { |
906 empty_young_list(); | |
907 g1_policy()->set_full_young_gcs(true); | |
908 } | |
909 | |
910 // Temporarily make reference _discovery_ single threaded (non-MT). | |
911 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
912 | |
913 // Temporarily make refs discovery atomic | |
914 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
915 | |
916 // Temporarily clear _is_alive_non_header | |
917 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
918 | |
919 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
920 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 921 |
922 // Do collection work | |
923 { | |
924 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
925 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 926 } |
927 // Because freeing humongous regions may have added some unclean | |
928 // regions, it is necessary to tear down again before rebuilding. | |
929 tear_down_region_lists(); | |
930 rebuild_region_lists(); | |
931 | |
932 _summary_bytes_used = recalculate_used(); | |
933 | |
934 ref_processor()->enqueue_discovered_references(); | |
935 | |
936 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
937 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
938 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
939 |
342 | 940 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
941 HandleMark hm; // Discard invalid handles created during verification | |
942 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
943 prepare_for_verify(); |
342 | 944 Universe::verify(false); |
945 } | |
946 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
947 | |
948 reset_gc_time_stamp(); | |
949 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
950 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
951 // sets. We will also reset the GC time stamps of the regions. |
342 | 952 PostMCRemSetClearClosure rs_clear(mr_bs()); |
953 heap_region_iterate(&rs_clear); | |
954 | |
955 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
956 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 957 |
958 if (_cg1r->use_cache()) { | |
959 _cg1r->clear_and_record_card_counts(); | |
960 _cg1r->clear_hot_cache(); | |
961 } | |
962 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
963 // Rebuild remembered sets of all regions. |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
964 if (ParallelGCThreads > 0) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
965 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
966 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
967 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
968 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
969 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
970 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
971 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
972 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
973 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
974 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
975 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
976 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
977 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
978 |
342 | 979 if (PrintGC) { |
980 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
981 } | |
982 | |
983 if (true) { // FIXME | |
984 // Ask the permanent generation to adjust size for full collections | |
985 perm()->compute_new_size(); | |
986 } | |
987 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
988 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
989 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
990 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
991 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
992 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
993 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
994 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
995 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
996 |
342 | 997 double end = os::elapsedTime(); |
998 g1_policy()->record_full_collection_end(); | |
999 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1000 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1001 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1002 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1003 |
342 | 1004 gc_epilogue(true); |
1005 | |
794 | 1006 // Discard all rset updates |
1007 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1008 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1009 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1010 assert(regions_accounted_for(), "Region leakage!"); |
1011 } | |
1012 | |
1013 if (g1_policy()->in_young_gc_mode()) { | |
1014 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1015 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1016 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1017 assert( check_young_list_empty(true /* check_heap */), |
342 | 1018 "young list should be empty at this point"); |
1019 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1020 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1021 // Update the number of full collections that have been completed. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1022 increment_full_collections_completed(false /* outer */); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1023 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1024 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1025 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1026 } |
342 | 1027 } |
1028 | |
1029 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1030 do_collection(true, /* explicit_gc */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1031 clear_all_soft_refs, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1032 0 /* word_size */); |
342 | 1033 } |
1034 | |
1035 // This code is mostly copied from TenuredGeneration. | |
1036 void | |
1037 G1CollectedHeap:: | |
1038 resize_if_necessary_after_full_collection(size_t word_size) { | |
1039 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1040 | |
1041 // Include the current allocation, if any, and bytes that will be | |
1042 // pre-allocated to support collections, as "used". | |
1043 const size_t used_after_gc = used(); | |
1044 const size_t capacity_after_gc = capacity(); | |
1045 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1046 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1047 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1048 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1049 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1050 |
342 | 1051 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1052 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1053 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1054 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1055 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1056 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1057 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1058 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1059 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1060 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1061 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1062 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1063 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1064 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1065 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1066 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1067 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1068 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1069 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1070 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1071 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1072 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1073 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1074 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1075 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1076 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1077 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1078 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1079 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1080 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1081 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1082 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1083 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1084 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1085 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1086 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1087 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1088 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1089 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1090 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1091 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1092 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1093 |
1094 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1095 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1096 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1097 gclog_or_tty->print_cr("Computing new size after full GC "); |
1098 gclog_or_tty->print_cr(" " | |
1099 " minimum_free_percentage: %6.2f", | |
1100 minimum_free_percentage); | |
1101 gclog_or_tty->print_cr(" " | |
1102 " maximum_free_percentage: %6.2f", | |
1103 maximum_free_percentage); | |
1104 gclog_or_tty->print_cr(" " | |
1105 " capacity: %6.1fK" | |
1106 " minimum_desired_capacity: %6.1fK" | |
1107 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1108 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1109 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1110 (double) maximum_desired_capacity / (double) K); |
342 | 1111 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1112 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1113 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1114 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1115 (double) used_after_gc / (double) K); |
342 | 1116 gclog_or_tty->print_cr(" " |
1117 " free_percentage: %6.2f", | |
1118 free_percentage); | |
1119 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1120 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1121 // Don't expand unless it's significant |
1122 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1123 expand(expand_bytes); | |
1124 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1125 gclog_or_tty->print_cr(" " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1126 " expanding:" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1127 " max_heap_size: %6.1fK" |
342 | 1128 " minimum_desired_capacity: %6.1fK" |
1129 " expand_bytes: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1130 (double) max_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1131 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1132 (double) expand_bytes / (double) K); |
342 | 1133 } |
1134 | |
1135 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1136 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1137 // Capacity too large, compute shrinking size |
1138 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1139 shrink(shrink_bytes); | |
1140 if (PrintGC && Verbose) { | |
1141 gclog_or_tty->print_cr(" " | |
1142 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1143 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1144 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1145 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1146 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1147 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1148 (double) shrink_bytes / (double) K); |
342 | 1149 } |
1150 } | |
1151 } | |
1152 | |
1153 | |
1154 HeapWord* | |
1155 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1156 HeapWord* result = NULL; | |
1157 | |
1158 // In a G1 heap, we're supposed to keep allocation from failing by | |
1159 // incremental pauses. Therefore, at least for now, we'll favor | |
1160 // expansion over collection. (This might change in the future if we can | |
1161 // do something smarter than full collection to satisfy a failed alloc.) | |
1162 | |
1163 result = expand_and_allocate(word_size); | |
1164 if (result != NULL) { | |
1165 assert(is_in(result), "result not in heap"); | |
1166 return result; | |
1167 } | |
1168 | |
1169 // OK, I guess we have to try collection. | |
1170 | |
1171 do_collection(false, false, word_size); | |
1172 | |
1173 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1174 | |
1175 if (result != NULL) { | |
1176 assert(is_in(result), "result not in heap"); | |
1177 return result; | |
1178 } | |
1179 | |
1180 // Try collecting soft references. | |
1181 do_collection(false, true, word_size); | |
1182 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1183 if (result != NULL) { | |
1184 assert(is_in(result), "result not in heap"); | |
1185 return result; | |
1186 } | |
1187 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1188 assert(!collector_policy()->should_clear_all_soft_refs(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1189 "Flag should have been handled and cleared prior to this point"); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1190 |
342 | 1191 // What else? We might try synchronous finalization later. If the total |
1192 // space available is large enough for the allocation, then a more | |
1193 // complete compaction phase than we've tried so far might be | |
1194 // appropriate. | |
1195 return NULL; | |
1196 } | |
1197 | |
1198 // Attempting to expand the heap sufficiently | |
1199 // to support an allocation of the given "word_size". If | |
1200 // successful, perform the allocation and return the address of the | |
1201 // allocated block, or else "NULL". | |
1202 | |
1203 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1204 size_t expand_bytes = word_size * HeapWordSize; | |
1205 if (expand_bytes < MinHeapDeltaBytes) { | |
1206 expand_bytes = MinHeapDeltaBytes; | |
1207 } | |
1208 expand(expand_bytes); | |
1209 assert(regions_accounted_for(), "Region leakage!"); | |
1210 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1211 return result; | |
1212 } | |
1213 | |
1214 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1215 size_t pre_used = 0; | |
1216 size_t cleared_h_regions = 0; | |
1217 size_t freed_regions = 0; | |
1218 UncleanRegionList local_list; | |
1219 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1220 freed_regions, &local_list); | |
1221 | |
1222 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1223 &local_list); | |
1224 return pre_used; | |
1225 } | |
1226 | |
1227 void | |
1228 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1229 size_t& pre_used, | |
1230 size_t& cleared_h, | |
1231 size_t& freed_regions, | |
1232 UncleanRegionList* list, | |
1233 bool par) { | |
1234 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1235 size_t res = 0; | |
677 | 1236 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1237 !hr->is_young()) { | |
1238 if (G1PolicyVerbose > 0) | |
1239 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1240 " during cleanup", hr, hr->used()); | |
1241 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1242 } |
1243 } | |
1244 | |
1245 // FIXME: both this and shrink could probably be more efficient by | |
1246 // doing one "VirtualSpace::expand_by" call rather than several. | |
1247 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1248 size_t old_mem_size = _g1_storage.committed_size(); | |
1249 // We expand by a minimum of 1K. | |
1250 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1251 size_t aligned_expand_bytes = | |
1252 ReservedSpace::page_align_size_up(expand_bytes); | |
1253 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1254 HeapRegion::GrainBytes); | |
1255 expand_bytes = aligned_expand_bytes; | |
1256 while (expand_bytes > 0) { | |
1257 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1258 // Commit more storage. | |
1259 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1260 if (!successful) { | |
1261 expand_bytes = 0; | |
1262 } else { | |
1263 expand_bytes -= HeapRegion::GrainBytes; | |
1264 // Expand the committed region. | |
1265 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1266 _g1_committed.set_end(high); | |
1267 // Create a new HeapRegion. | |
1268 MemRegion mr(base, high); | |
1269 bool is_zeroed = !_g1_max_committed.contains(base); | |
1270 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1271 | |
1272 // Now update max_committed if necessary. | |
1273 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1274 | |
1275 // Add it to the HeapRegionSeq. | |
1276 _hrs->insert(hr); | |
1277 // Set the zero-fill state, according to whether it's already | |
1278 // zeroed. | |
1279 { | |
1280 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1281 if (is_zeroed) { | |
1282 hr->set_zero_fill_complete(); | |
1283 put_free_region_on_list_locked(hr); | |
1284 } else { | |
1285 hr->set_zero_fill_needed(); | |
1286 put_region_on_unclean_list_locked(hr); | |
1287 } | |
1288 } | |
1289 _free_regions++; | |
1290 // And we used up an expansion region to create it. | |
1291 _expansion_regions--; | |
1292 // Tell the cardtable about it. | |
1293 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1294 // And the offset table as well. | |
1295 _bot_shared->resize(_g1_committed.word_size()); | |
1296 } | |
1297 } | |
1298 if (Verbose && PrintGC) { | |
1299 size_t new_mem_size = _g1_storage.committed_size(); | |
1300 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1301 old_mem_size/K, aligned_expand_bytes/K, | |
1302 new_mem_size/K); | |
1303 } | |
1304 } | |
1305 | |
1306 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1307 { | |
1308 size_t old_mem_size = _g1_storage.committed_size(); | |
1309 size_t aligned_shrink_bytes = | |
1310 ReservedSpace::page_align_size_down(shrink_bytes); | |
1311 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1312 HeapRegion::GrainBytes); | |
1313 size_t num_regions_deleted = 0; | |
1314 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1315 | |
1316 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1317 if (mr.byte_size() > 0) | |
1318 _g1_storage.shrink_by(mr.byte_size()); | |
1319 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1320 | |
1321 _g1_committed.set_end(mr.start()); | |
1322 _free_regions -= num_regions_deleted; | |
1323 _expansion_regions += num_regions_deleted; | |
1324 | |
1325 // Tell the cardtable about it. | |
1326 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1327 | |
1328 // And the offset table as well. | |
1329 _bot_shared->resize(_g1_committed.word_size()); | |
1330 | |
1331 HeapRegionRemSet::shrink_heap(n_regions()); | |
1332 | |
1333 if (Verbose && PrintGC) { | |
1334 size_t new_mem_size = _g1_storage.committed_size(); | |
1335 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1336 old_mem_size/K, aligned_shrink_bytes/K, | |
1337 new_mem_size/K); | |
1338 } | |
1339 } | |
1340 | |
1341 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1342 release_gc_alloc_regions(true /* totally */); |
342 | 1343 tear_down_region_lists(); // We will rebuild them in a moment. |
1344 shrink_helper(shrink_bytes); | |
1345 rebuild_region_lists(); | |
1346 } | |
1347 | |
1348 // Public methods. | |
1349 | |
1350 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1351 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1352 #endif // _MSC_VER | |
1353 | |
1354 | |
1355 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1356 SharedHeap(policy_), | |
1357 _g1_policy(policy_), | |
1111 | 1358 _dirty_card_queue_set(false), |
1705 | 1359 _into_cset_dirty_card_queue_set(false), |
342 | 1360 _ref_processor(NULL), |
1361 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1362 _bot_shared(NULL), | |
1363 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1364 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1365 _evac_failure_scan_stack(NULL) , | |
1366 _mark_in_progress(false), | |
1367 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1368 _cur_alloc_region(NULL), | |
1369 _refine_cte_cl(NULL), | |
1370 _free_region_list(NULL), _free_region_list_size(0), | |
1371 _free_regions(0), | |
1372 _full_collection(false), | |
1373 _unclean_region_list(), | |
1374 _unclean_regions_coming(false), | |
1375 _young_list(new YoungList(this)), | |
1376 _gc_time_stamp(0), | |
526 | 1377 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1378 _full_collections_completed(0), |
526 | 1379 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1380 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1381 _dirty_cards_region_list(NULL) { |
342 | 1382 _g1h = this; // To catch bugs. |
1383 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1384 vm_exit_during_initialization("Failed necessary allocation."); | |
1385 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1386 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1387 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1388 |
342 | 1389 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1390 _task_queues = new RefToScanQueueSet(n_queues); | |
1391 | |
1392 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1393 assert(n_rem_sets > 0, "Invariant."); | |
1394 | |
1395 HeapRegionRemSetIterator** iter_arr = | |
1396 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1397 for (int i = 0; i < n_queues; i++) { | |
1398 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1399 } | |
1400 _rem_set_iterator = iter_arr; | |
1401 | |
1402 for (int i = 0; i < n_queues; i++) { | |
1403 RefToScanQueue* q = new RefToScanQueue(); | |
1404 q->initialize(); | |
1405 _task_queues->register_queue(i, q); | |
1406 } | |
1407 | |
1408 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1409 _gc_alloc_regions[ap] = NULL; |
1410 _gc_alloc_region_counts[ap] = 0; | |
1411 _retained_gc_alloc_regions[ap] = NULL; | |
1412 // by default, we do not retain a GC alloc region for each ap; | |
1413 // we'll override this, when appropriate, below | |
1414 _retain_gc_alloc_region[ap] = false; | |
1415 } | |
1416 | |
1417 // We will try to remember the last half-full tenured region we | |
1418 // allocated to at the end of a collection so that we can re-use it | |
1419 // during the next collection. | |
1420 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1421 | |
342 | 1422 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1423 } | |
1424 | |
1425 jint G1CollectedHeap::initialize() { | |
1166 | 1426 CollectedHeap::pre_initialize(); |
342 | 1427 os::enable_vtime(); |
1428 | |
1429 // Necessary to satisfy locking discipline assertions. | |
1430 | |
1431 MutexLocker x(Heap_lock); | |
1432 | |
1433 // While there are no constraints in the GC code that HeapWordSize | |
1434 // be any particular value, there are multiple other areas in the | |
1435 // system which believe this to be true (e.g. oop->object_size in some | |
1436 // cases incorrectly returns the size in wordSize units rather than | |
1437 // HeapWordSize). | |
1438 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1439 | |
1440 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1441 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1442 | |
1443 // Ensure that the sizes are properly aligned. | |
1444 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1445 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1446 | |
1447 _cg1r = new ConcurrentG1Refine(); | |
1448 | |
1449 // Reserve the maximum. | |
1450 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1451 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1452 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1453 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1454 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1455 |
342 | 1456 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1457 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1458 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1459 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1460 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1461 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1462 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1463 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1464 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1465 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1466 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1467 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1468 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1469 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1470 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1471 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1472 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1473 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1474 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1475 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1476 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1477 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1478 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1479 } |
342 | 1480 |
1481 if (!heap_rs.is_reserved()) { | |
1482 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1483 return JNI_ENOMEM; | |
1484 } | |
1485 | |
1486 // It is important to do this in a way such that concurrent readers can't | |
1487 // temporarily think somethings in the heap. (I've actually seen this | |
1488 // happen in asserts: DLD.) | |
1489 _reserved.set_word_size(0); | |
1490 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1491 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1492 | |
1493 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1494 | |
1495 _num_humongous_regions = 0; | |
1496 | |
1497 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1498 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1499 set_barrier_set(rem_set()->bs()); | |
1500 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1501 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1502 } else { | |
1503 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1504 return JNI_ENOMEM; | |
1505 } | |
1506 | |
1507 // Also create a G1 rem set. | |
1508 if (G1UseHRIntoRS) { | |
1509 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1510 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1511 } else { | |
1512 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1513 return JNI_ENOMEM; | |
1514 } | |
1515 } else { | |
1516 _g1_rem_set = new StupidG1RemSet(this); | |
1517 } | |
1518 | |
1519 // Carve out the G1 part of the heap. | |
1520 | |
1521 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1522 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1523 g1_rs.size()/HeapWordSize); | |
1524 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1525 | |
1526 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1527 | |
1528 _g1_storage.initialize(g1_rs, 0); | |
1529 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1530 _g1_max_committed = _g1_committed; | |
393 | 1531 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1532 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1533 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1534 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1535 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1536 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1537 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1538 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1539 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1540 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1541 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1542 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1543 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1544 |
342 | 1545 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1546 heap_word_size(init_byte_size)); | |
1547 | |
1548 _g1h = this; | |
1549 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1550 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1551 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1552 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1553 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1554 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1555 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1556 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1557 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1558 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1559 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1560 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1561 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1562 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1563 |
342 | 1564 // Create the ConcurrentMark data structure and thread. |
1565 // (Must do this late, so that "max_regions" is defined.) | |
1566 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1567 _cmThread = _cm->cmThread(); | |
1568 | |
1569 // ...and the concurrent zero-fill thread, if necessary. | |
1570 if (G1ConcZeroFill) { | |
1571 _czft = new ConcurrentZFThread(); | |
1572 } | |
1573 | |
1574 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1575 HeapRegionRemSet::init_heap(max_regions()); | |
1576 | |
677 | 1577 // Now expand into the initial heap size. |
1578 expand(init_byte_size); | |
342 | 1579 |
1580 // Perform any initialization actions delegated to the policy. | |
1581 g1_policy()->init(); | |
1582 | |
1583 g1_policy()->note_start_of_mark_thread(); | |
1584 | |
1585 _refine_cte_cl = | |
1586 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1587 g1_rem_set(), | |
1588 concurrent_g1_refine()); | |
1589 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1590 | |
1591 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1592 SATB_Q_FL_lock, | |
1111 | 1593 G1SATBProcessCompletedThreshold, |
342 | 1594 Shared_SATB_Q_lock); |
794 | 1595 |
1596 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1597 DirtyCardQ_FL_lock, | |
1111 | 1598 concurrent_g1_refine()->yellow_zone(), |
1599 concurrent_g1_refine()->red_zone(), | |
794 | 1600 Shared_DirtyCardQ_lock); |
1601 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1602 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1603 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1604 DirtyCardQ_FL_lock, |
1111 | 1605 -1, // never trigger processing |
1606 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1607 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1608 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1609 } |
1705 | 1610 |
1611 // Initialize the card queue set used to hold cards containing | |
1612 // references into the collection set. | |
1613 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1614 DirtyCardQ_FL_lock, | |
1615 -1, // never trigger processing | |
1616 -1, // no limit on length | |
1617 Shared_DirtyCardQ_lock, | |
1618 &JavaThread::dirty_card_queue_set()); | |
1619 | |
342 | 1620 // In case we're keeping closure specialization stats, initialize those |
1621 // counts and that mechanism. | |
1622 SpecializationStats::clear(); | |
1623 | |
1624 _gc_alloc_region_list = NULL; | |
1625 | |
1626 // Do later initialization work for concurrent refinement. | |
1627 _cg1r->init(); | |
1628 | |
1629 return JNI_OK; | |
1630 } | |
1631 | |
1632 void G1CollectedHeap::ref_processing_init() { | |
1633 SharedHeap::ref_processing_init(); | |
1634 MemRegion mr = reserved_region(); | |
1635 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1636 mr, // span | |
1637 false, // Reference discovery is not atomic | |
1638 // (though it shouldn't matter here.) | |
1639 true, // mt_discovery | |
1640 NULL, // is alive closure: need to fill this in for efficiency | |
1641 ParallelGCThreads, | |
1642 ParallelRefProcEnabled, | |
1643 true); // Setting next fields of discovered | |
1644 // lists requires a barrier. | |
1645 } | |
1646 | |
1647 size_t G1CollectedHeap::capacity() const { | |
1648 return _g1_committed.byte_size(); | |
1649 } | |
1650 | |
1705 | 1651 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
1652 DirtyCardQueue* into_cset_dcq, | |
1653 bool concurrent, | |
342 | 1654 int worker_i) { |
889 | 1655 // Clean cards in the hot card cache |
1705 | 1656 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 1657 |
342 | 1658 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
1659 int n_completed_buffers = 0; | |
1705 | 1660 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 1661 n_completed_buffers++; |
1662 } | |
1663 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1664 (double) n_completed_buffers); | |
1665 dcqs.clear_n_completed_buffers(); | |
1666 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1667 } | |
1668 | |
1669 | |
1670 // Computes the sum of the storage used by the various regions. | |
1671 | |
1672 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1673 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1674 "Should be owned on this thread's behalf."); |
342 | 1675 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1676 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1677 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1678 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1679 result += hr->used(); |
342 | 1680 return result; |
1681 } | |
1682 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1683 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1684 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1685 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1686 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1687 |
342 | 1688 class SumUsedClosure: public HeapRegionClosure { |
1689 size_t _used; | |
1690 public: | |
1691 SumUsedClosure() : _used(0) {} | |
1692 bool doHeapRegion(HeapRegion* r) { | |
1693 if (!r->continuesHumongous()) { | |
1694 _used += r->used(); | |
1695 } | |
1696 return false; | |
1697 } | |
1698 size_t result() { return _used; } | |
1699 }; | |
1700 | |
1701 size_t G1CollectedHeap::recalculate_used() const { | |
1702 SumUsedClosure blk; | |
1703 _hrs->iterate(&blk); | |
1704 return blk.result(); | |
1705 } | |
1706 | |
1707 #ifndef PRODUCT | |
1708 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1709 size_t _num; | |
1710 public: | |
677 | 1711 SumUsedRegionsClosure() : _num(0) {} |
342 | 1712 bool doHeapRegion(HeapRegion* r) { |
1713 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1714 _num += 1; | |
1715 } | |
1716 return false; | |
1717 } | |
1718 size_t result() { return _num; } | |
1719 }; | |
1720 | |
1721 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1722 SumUsedRegionsClosure blk; | |
1723 _hrs->iterate(&blk); | |
1724 return blk.result(); | |
1725 } | |
1726 #endif // PRODUCT | |
1727 | |
1728 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1729 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1730 // otherwise, is there space in the current allocation region? | |
1731 | |
1732 // We need to store the current allocation region in a local variable | |
1733 // here. The problem is that this method doesn't take any locks and | |
1734 // there may be other threads which overwrite the current allocation | |
1735 // region field. attempt_allocation(), for example, sets it to NULL | |
1736 // and this can happen *after* the NULL check here but before the call | |
1737 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1738 // to be a problem in the optimized build, since the two loads of the | |
1739 // current allocation region field are optimized away. | |
1740 HeapRegion* car = _cur_alloc_region; | |
1741 | |
1742 // FIXME: should iterate over all regions? | |
1743 if (car == NULL) { | |
1744 return 0; | |
1745 } | |
1746 return car->free(); | |
1747 } | |
1748 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1749 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1750 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1751 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1752 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1753 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1754 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1755 void G1CollectedHeap::increment_full_collections_completed(bool outer) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1756 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1757 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1758 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1759 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1760 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1761 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1762 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1763 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1764 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1765 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1766 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1767 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1768 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1769 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1770 // This is the case for the inner caller, i.e. a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1771 assert(outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1772 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1773 (full_collections_started == _full_collections_completed + 2), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1774 err_msg("for inner caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1775 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1776 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1777 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1778 // This is the case for the outer caller, i.e. the concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1779 assert(!outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1780 (full_collections_started == _full_collections_completed + 1), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1781 err_msg("for outer caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1782 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1783 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1784 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1785 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1786 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1787 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1788 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1789 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1790 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1791 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1792 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1793 |
342 | 1794 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
1795 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1796 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1797 GCCauseSetter gcs(this, cause); | |
1798 switch (cause) { | |
1799 case GCCause::_heap_inspection: | |
1800 case GCCause::_heap_dump: { | |
1801 HandleMark hm; | |
1802 do_full_collection(false); // don't clear all soft refs | |
1803 break; | |
1804 } | |
1805 default: // XXX FIX ME | |
1806 ShouldNotReachHere(); // Unexpected use of this function | |
1807 } | |
1808 } | |
1809 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1810 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1811 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1812 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1813 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1814 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1815 unsigned int full_gc_count_before; |
342 | 1816 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1817 MutexLocker ml(Heap_lock); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1818 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1819 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1820 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1821 |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1822 // Don't want to do a GC until cleanup is completed. |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1823 wait_for_cleanup_complete(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1824 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1825 // We give up heap lock; VMThread::execute gets it back below |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1826 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1827 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1828 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1829 // Schedule an initial-mark evacuation pause that will start a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1830 // concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1831 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1832 true, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1833 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1834 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1835 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1836 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1837 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1838 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1839 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1840 // Schedule a standard evacuation pause. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1841 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1842 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1843 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1844 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1845 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1846 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1847 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1848 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1849 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1850 } |
342 | 1851 } |
1852 } | |
1853 | |
1854 bool G1CollectedHeap::is_in(const void* p) const { | |
1855 if (_g1_committed.contains(p)) { | |
1856 HeapRegion* hr = _hrs->addr_to_region(p); | |
1857 return hr->is_in(p); | |
1858 } else { | |
1859 return _perm_gen->as_gen()->is_in(p); | |
1860 } | |
1861 } | |
1862 | |
1863 // Iteration functions. | |
1864 | |
1865 // Iterates an OopClosure over all ref-containing fields of objects | |
1866 // within a HeapRegion. | |
1867 | |
1868 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1869 MemRegion _mr; | |
1870 OopClosure* _cl; | |
1871 public: | |
1872 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1873 : _mr(mr), _cl(cl) {} | |
1874 bool doHeapRegion(HeapRegion* r) { | |
1875 if (! r->continuesHumongous()) { | |
1876 r->oop_iterate(_cl); | |
1877 } | |
1878 return false; | |
1879 } | |
1880 }; | |
1881 | |
678 | 1882 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 1883 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
1884 _hrs->iterate(&blk); | |
678 | 1885 if (do_perm) { |
1886 perm_gen()->oop_iterate(cl); | |
1887 } | |
342 | 1888 } |
1889 | |
678 | 1890 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 1891 IterateOopClosureRegionClosure blk(mr, cl); |
1892 _hrs->iterate(&blk); | |
678 | 1893 if (do_perm) { |
1894 perm_gen()->oop_iterate(cl); | |
1895 } | |
342 | 1896 } |
1897 | |
1898 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1899 | |
1900 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1901 ObjectClosure* _cl; | |
1902 public: | |
1903 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1904 bool doHeapRegion(HeapRegion* r) { | |
1905 if (! r->continuesHumongous()) { | |
1906 r->object_iterate(_cl); | |
1907 } | |
1908 return false; | |
1909 } | |
1910 }; | |
1911 | |
678 | 1912 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 1913 IterateObjectClosureRegionClosure blk(cl); |
1914 _hrs->iterate(&blk); | |
678 | 1915 if (do_perm) { |
1916 perm_gen()->object_iterate(cl); | |
1917 } | |
342 | 1918 } |
1919 | |
1920 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1921 // FIXME: is this right? | |
1922 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1923 } | |
1924 | |
1925 // Calls a SpaceClosure on a HeapRegion. | |
1926 | |
1927 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1928 SpaceClosure* _cl; | |
1929 public: | |
1930 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1931 bool doHeapRegion(HeapRegion* r) { | |
1932 _cl->do_space(r); | |
1933 return false; | |
1934 } | |
1935 }; | |
1936 | |
1937 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1938 SpaceClosureRegionClosure blk(cl); | |
1939 _hrs->iterate(&blk); | |
1940 } | |
1941 | |
1942 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1943 _hrs->iterate(cl); | |
1944 } | |
1945 | |
1946 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1947 HeapRegionClosure* cl) { | |
1948 _hrs->iterate_from(r, cl); | |
1949 } | |
1950 | |
1951 void | |
1952 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1953 _hrs->iterate_from(idx, cl); | |
1954 } | |
1955 | |
1956 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1957 | |
1958 void | |
1959 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1960 int worker, | |
1961 jint claim_value) { | |
355 | 1962 const size_t regions = n_regions(); |
1963 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1964 // try to spread out the starting points of the workers | |
1965 const size_t start_index = regions / worker_num * (size_t) worker; | |
1966 | |
1967 // each worker will actually look at all regions | |
1968 for (size_t count = 0; count < regions; ++count) { | |
1969 const size_t index = (start_index + count) % regions; | |
1970 assert(0 <= index && index < regions, "sanity"); | |
1971 HeapRegion* r = region_at(index); | |
1972 // we'll ignore "continues humongous" regions (we'll process them | |
1973 // when we come across their corresponding "start humongous" | |
1974 // region) and regions already claimed | |
1975 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1976 continue; | |
1977 } | |
1978 // OK, try to claim it | |
342 | 1979 if (r->claimHeapRegion(claim_value)) { |
355 | 1980 // success! |
1981 assert(!r->continuesHumongous(), "sanity"); | |
1982 if (r->startsHumongous()) { | |
1983 // If the region is "starts humongous" we'll iterate over its | |
1984 // "continues humongous" first; in fact we'll do them | |
1985 // first. The order is important. In on case, calling the | |
1986 // closure on the "starts humongous" region might de-allocate | |
1987 // and clear all its "continues humongous" regions and, as a | |
1988 // result, we might end up processing them twice. So, we'll do | |
1989 // them first (notice: most closures will ignore them anyway) and | |
1990 // then we'll do the "starts humongous" region. | |
1991 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1992 HeapRegion* chr = region_at(ch_index); | |
1993 | |
1994 // if the region has already been claimed or it's not | |
1995 // "continues humongous" we're done | |
1996 if (chr->claim_value() == claim_value || | |
1997 !chr->continuesHumongous()) { | |
1998 break; | |
1999 } | |
2000 | |
2001 // Noone should have claimed it directly. We can given | |
2002 // that we claimed its "starts humongous" region. | |
2003 assert(chr->claim_value() != claim_value, "sanity"); | |
2004 assert(chr->humongous_start_region() == r, "sanity"); | |
2005 | |
2006 if (chr->claimHeapRegion(claim_value)) { | |
2007 // we should always be able to claim it; noone else should | |
2008 // be trying to claim this region | |
2009 | |
2010 bool res2 = cl->doHeapRegion(chr); | |
2011 assert(!res2, "Should not abort"); | |
2012 | |
2013 // Right now, this holds (i.e., no closure that actually | |
2014 // does something with "continues humongous" regions | |
2015 // clears them). We might have to weaken it in the future, | |
2016 // but let's leave these two asserts here for extra safety. | |
2017 assert(chr->continuesHumongous(), "should still be the case"); | |
2018 assert(chr->humongous_start_region() == r, "sanity"); | |
2019 } else { | |
2020 guarantee(false, "we should not reach here"); | |
2021 } | |
2022 } | |
2023 } | |
2024 | |
2025 assert(!r->continuesHumongous(), "sanity"); | |
2026 bool res = cl->doHeapRegion(r); | |
2027 assert(!res, "Should not abort"); | |
2028 } | |
2029 } | |
2030 } | |
2031 | |
390 | 2032 class ResetClaimValuesClosure: public HeapRegionClosure { |
2033 public: | |
2034 bool doHeapRegion(HeapRegion* r) { | |
2035 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2036 return false; | |
2037 } | |
2038 }; | |
2039 | |
2040 void | |
2041 G1CollectedHeap::reset_heap_region_claim_values() { | |
2042 ResetClaimValuesClosure blk; | |
2043 heap_region_iterate(&blk); | |
2044 } | |
2045 | |
355 | 2046 #ifdef ASSERT |
2047 // This checks whether all regions in the heap have the correct claim | |
2048 // value. I also piggy-backed on this a check to ensure that the | |
2049 // humongous_start_region() information on "continues humongous" | |
2050 // regions is correct. | |
2051 | |
2052 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2053 private: | |
2054 jint _claim_value; | |
2055 size_t _failures; | |
2056 HeapRegion* _sh_region; | |
2057 public: | |
2058 CheckClaimValuesClosure(jint claim_value) : | |
2059 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2060 bool doHeapRegion(HeapRegion* r) { | |
2061 if (r->claim_value() != _claim_value) { | |
2062 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2063 "claim value = %d, should be %d", | |
2064 r->bottom(), r->end(), r->claim_value(), | |
2065 _claim_value); | |
2066 ++_failures; | |
2067 } | |
2068 if (!r->isHumongous()) { | |
2069 _sh_region = NULL; | |
2070 } else if (r->startsHumongous()) { | |
2071 _sh_region = r; | |
2072 } else if (r->continuesHumongous()) { | |
2073 if (r->humongous_start_region() != _sh_region) { | |
2074 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2075 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2076 r->bottom(), r->end(), | |
2077 r->humongous_start_region(), | |
2078 _sh_region); | |
2079 ++_failures; | |
342 | 2080 } |
2081 } | |
355 | 2082 return false; |
2083 } | |
2084 size_t failures() { | |
2085 return _failures; | |
2086 } | |
2087 }; | |
2088 | |
2089 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2090 CheckClaimValuesClosure cl(claim_value); | |
2091 heap_region_iterate(&cl); | |
2092 return cl.failures() == 0; | |
2093 } | |
2094 #endif // ASSERT | |
342 | 2095 |
2096 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2097 HeapRegion* r = g1_policy()->collection_set(); | |
2098 while (r != NULL) { | |
2099 HeapRegion* next = r->next_in_collection_set(); | |
2100 if (cl->doHeapRegion(r)) { | |
2101 cl->incomplete(); | |
2102 return; | |
2103 } | |
2104 r = next; | |
2105 } | |
2106 } | |
2107 | |
2108 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2109 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2110 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2111 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2112 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2113 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2114 |
342 | 2115 assert(r->in_collection_set(), |
2116 "Start region must be a member of the collection set."); | |
2117 HeapRegion* cur = r; | |
2118 while (cur != NULL) { | |
2119 HeapRegion* next = cur->next_in_collection_set(); | |
2120 if (cl->doHeapRegion(cur) && false) { | |
2121 cl->incomplete(); | |
2122 return; | |
2123 } | |
2124 cur = next; | |
2125 } | |
2126 cur = g1_policy()->collection_set(); | |
2127 while (cur != r) { | |
2128 HeapRegion* next = cur->next_in_collection_set(); | |
2129 if (cl->doHeapRegion(cur) && false) { | |
2130 cl->incomplete(); | |
2131 return; | |
2132 } | |
2133 cur = next; | |
2134 } | |
2135 } | |
2136 | |
2137 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2138 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2139 } | |
2140 | |
2141 | |
2142 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2143 Space* res = heap_region_containing(addr); | |
2144 if (res == NULL) | |
2145 res = perm_gen()->space_containing(addr); | |
2146 return res; | |
2147 } | |
2148 | |
2149 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2150 Space* sp = space_containing(addr); | |
2151 if (sp != NULL) { | |
2152 return sp->block_start(addr); | |
2153 } | |
2154 return NULL; | |
2155 } | |
2156 | |
2157 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2158 Space* sp = space_containing(addr); | |
2159 assert(sp != NULL, "block_size of address outside of heap"); | |
2160 return sp->block_size(addr); | |
2161 } | |
2162 | |
2163 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2164 Space* sp = space_containing(addr); | |
2165 return sp->block_is_obj(addr); | |
2166 } | |
2167 | |
2168 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2169 return true; | |
2170 } | |
2171 | |
2172 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2173 return HeapRegion::GrainBytes; | |
2174 } | |
2175 | |
2176 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2177 // Return the remaining space in the cur alloc region, but not less than | |
2178 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2179 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2180 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2181 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2182 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2183 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2184 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2185 // between when we test for NULL and when we use it later. |
342 | 2186 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2187 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2188 |
342 | 2189 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2190 return max_tlab_size; |
342 | 2191 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2192 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2193 max_tlab_size); |
342 | 2194 } |
2195 } | |
2196 | |
2197 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
2198 bool dummy; | |
2199 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
2200 } | |
2201 | |
2202 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2203 return false; | |
2204 } | |
2205 | |
2206 size_t G1CollectedHeap::large_typearray_limit() { | |
2207 // FIXME | |
2208 return HeapRegion::GrainBytes/HeapWordSize; | |
2209 } | |
2210 | |
2211 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2212 return g1_reserved_obj_bytes(); |
342 | 2213 } |
2214 | |
2215 jlong G1CollectedHeap::millis_since_last_gc() { | |
2216 // assert(false, "NYI"); | |
2217 return 0; | |
2218 } | |
2219 | |
2220 | |
2221 void G1CollectedHeap::prepare_for_verify() { | |
2222 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2223 ensure_parsability(false); | |
2224 } | |
2225 g1_rem_set()->prepare_for_verify(); | |
2226 } | |
2227 | |
2228 class VerifyLivenessOopClosure: public OopClosure { | |
2229 G1CollectedHeap* g1h; | |
2230 public: | |
2231 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2232 g1h = _g1h; | |
2233 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2234 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2235 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2236 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2237 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2238 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2239 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2240 "Dead object referenced by a not dead object"); |
342 | 2241 } |
2242 }; | |
2243 | |
2244 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2245 private: |
342 | 2246 G1CollectedHeap* _g1h; |
2247 size_t _live_bytes; | |
2248 HeapRegion *_hr; | |
811 | 2249 bool _use_prev_marking; |
342 | 2250 public: |
811 | 2251 // use_prev_marking == true -> use "prev" marking information, |
2252 // use_prev_marking == false -> use "next" marking information | |
2253 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2254 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2255 _g1h = G1CollectedHeap::heap(); |
2256 } | |
2257 void do_object(oop o) { | |
2258 VerifyLivenessOopClosure isLive(_g1h); | |
2259 assert(o != NULL, "Huh?"); | |
811 | 2260 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2261 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2262 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2263 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2264 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2265 } |
342 | 2266 } |
2267 } | |
2268 size_t live_bytes() { return _live_bytes; } | |
2269 }; | |
2270 | |
2271 class PrintObjsInRegionClosure : public ObjectClosure { | |
2272 HeapRegion *_hr; | |
2273 G1CollectedHeap *_g1; | |
2274 public: | |
2275 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2276 _g1 = G1CollectedHeap::heap(); | |
2277 }; | |
2278 | |
2279 void do_object(oop o) { | |
2280 if (o != NULL) { | |
2281 HeapWord *start = (HeapWord *) o; | |
2282 size_t word_sz = o->size(); | |
2283 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2284 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2285 (void*) o, word_sz, | |
2286 _g1->isMarkedPrev(o), | |
2287 _g1->isMarkedNext(o), | |
2288 _hr->obj_allocated_since_prev_marking(o)); | |
2289 HeapWord *end = start + word_sz; | |
2290 HeapWord *cur; | |
2291 int *val; | |
2292 for (cur = start; cur < end; cur++) { | |
2293 val = (int *) cur; | |
2294 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2295 } | |
2296 } | |
2297 } | |
2298 }; | |
2299 | |
2300 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2301 private: |
342 | 2302 bool _allow_dirty; |
390 | 2303 bool _par; |
811 | 2304 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2305 bool _failures; |
811 | 2306 public: |
2307 // use_prev_marking == true -> use "prev" marking information, | |
2308 // use_prev_marking == false -> use "next" marking information | |
2309 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2310 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2311 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2312 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2313 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2314 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2315 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2316 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2317 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2318 |
342 | 2319 bool doHeapRegion(HeapRegion* r) { |
390 | 2320 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2321 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2322 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2323 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2324 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2325 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2326 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2327 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2328 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2329 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2330 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2331 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2332 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2333 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2334 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2335 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2336 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2337 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2338 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2339 } |
342 | 2340 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2341 return false; // stop the region iteration if we hit a failure |
342 | 2342 } |
2343 }; | |
2344 | |
2345 class VerifyRootsClosure: public OopsInGenClosure { | |
2346 private: | |
2347 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2348 bool _use_prev_marking; |
342 | 2349 bool _failures; |
2350 public: | |
811 | 2351 // use_prev_marking == true -> use "prev" marking information, |
2352 // use_prev_marking == false -> use "next" marking information | |
2353 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2354 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2355 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2356 _failures(false) { } |
342 | 2357 |
2358 bool failures() { return _failures; } | |
2359 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2360 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2361 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2362 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2363 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2364 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2365 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2366 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2367 obj->print_on(gclog_or_tty); |
2368 _failures = true; | |
2369 } | |
2370 } | |
2371 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2372 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2373 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2374 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2375 }; |
2376 | |
390 | 2377 // This is the task used for parallel heap verification. |
2378 | |
2379 class G1ParVerifyTask: public AbstractGangTask { | |
2380 private: | |
2381 G1CollectedHeap* _g1h; | |
2382 bool _allow_dirty; | |
811 | 2383 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2384 bool _failures; |
390 | 2385 |
2386 public: | |
811 | 2387 // use_prev_marking == true -> use "prev" marking information, |
2388 // use_prev_marking == false -> use "next" marking information | |
2389 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2390 bool use_prev_marking) : | |
390 | 2391 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2392 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2393 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2394 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2395 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2396 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2397 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2398 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2399 } |
390 | 2400 |
2401 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2402 HandleMark hm; |
811 | 2403 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2404 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2405 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2406 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2407 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2408 } |
390 | 2409 } |
2410 }; | |
2411 | |
342 | 2412 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2413 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2414 } | |
2415 | |
2416 void G1CollectedHeap::verify(bool allow_dirty, | |
2417 bool silent, | |
2418 bool use_prev_marking) { | |
342 | 2419 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2420 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2421 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2422 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2423 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2424 false, |
342 | 2425 SharedHeap::SO_AllClasses, |
2426 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2427 &blobsCl, |
342 | 2428 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2429 bool failures = rootsCl.failures(); |
342 | 2430 rem_set()->invalidate(perm_gen()->used_region(), false); |
2431 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2432 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2433 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2434 "sanity check"); | |
2435 | |
811 | 2436 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2437 int n_workers = workers()->total_workers(); |
2438 set_par_threads(n_workers); | |
2439 workers()->run_task(&task); | |
2440 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2441 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2442 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2443 } |
390 | 2444 |
2445 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2446 "sanity check"); | |
2447 | |
2448 reset_heap_region_claim_values(); | |
2449 | |
2450 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2451 "sanity check"); | |
2452 } else { | |
811 | 2453 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2454 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2455 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2456 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2457 } |
390 | 2458 } |
342 | 2459 if (!silent) gclog_or_tty->print("remset "); |
2460 rem_set()->verify(); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2461 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2462 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2463 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2464 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2465 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2466 #ifndef PRODUCT |
1044 | 2467 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2468 concurrent_mark()->print_reachable("at-verification-failure", |
2469 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2470 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2471 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2472 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2473 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2474 guarantee(!failures, "there should not have been any failures"); |
342 | 2475 } else { |
2476 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2477 } | |
2478 } | |
2479 | |
2480 class PrintRegionClosure: public HeapRegionClosure { | |
2481 outputStream* _st; | |
2482 public: | |
2483 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2484 bool doHeapRegion(HeapRegion* r) { | |
2485 r->print_on(_st); | |
2486 return false; | |
2487 } | |
2488 }; | |
2489 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2490 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2491 |
2492 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2493 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2494 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2495 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2496 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2497 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2498 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2499 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2500 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2501 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2502 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2503 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2504 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2505 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2506 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2507 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2508 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2509 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2510 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2511 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2512 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2513 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2514 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2515 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2516 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2517 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2518 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2519 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2520 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2521 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2522 PrintRegionClosure blk(st); |
2523 _hrs->iterate(&blk); | |
2524 } | |
2525 | |
2526 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
2527 if (ParallelGCThreads > 0) { | |
1019 | 2528 workers()->print_worker_threads_on(st); |
2529 } | |
2530 | |
2531 _cmThread->print_on(st); | |
342 | 2532 st->cr(); |
1019 | 2533 |
2534 _cm->print_worker_threads_on(st); | |
2535 | |
2536 _cg1r->print_worker_threads_on(st); | |
2537 | |
342 | 2538 _czft->print_on(st); |
2539 st->cr(); | |
2540 } | |
2541 | |
2542 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2543 if (ParallelGCThreads > 0) { | |
2544 workers()->threads_do(tc); | |
2545 } | |
2546 tc->do_thread(_cmThread); | |
794 | 2547 _cg1r->threads_do(tc); |
342 | 2548 tc->do_thread(_czft); |
2549 } | |
2550 | |
2551 void G1CollectedHeap::print_tracing_info() const { | |
2552 // We'll overload this to mean "trace GC pause statistics." | |
2553 if (TraceGen0Time || TraceGen1Time) { | |
2554 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2555 // to that. | |
2556 g1_policy()->print_tracing_info(); | |
2557 } | |
751 | 2558 if (G1SummarizeRSetStats) { |
342 | 2559 g1_rem_set()->print_summary_info(); |
2560 } | |
1282 | 2561 if (G1SummarizeConcMark) { |
342 | 2562 concurrent_mark()->print_summary_info(); |
2563 } | |
751 | 2564 if (G1SummarizeZFStats) { |
342 | 2565 ConcurrentZFThread::print_summary_info(); |
2566 } | |
2567 g1_policy()->print_yg_surv_rate_info(); | |
2568 | |
2569 SpecializationStats::print(); | |
2570 } | |
2571 | |
2572 | |
2573 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2574 HeapRegion* hr = heap_region_containing(addr); | |
2575 if (hr == NULL) { | |
2576 return 0; | |
2577 } else { | |
2578 return 1; | |
2579 } | |
2580 } | |
2581 | |
2582 G1CollectedHeap* G1CollectedHeap::heap() { | |
2583 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2584 "not a garbage-first heap"); | |
2585 return _g1h; | |
2586 } | |
2587 | |
2588 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2589 // always_do_update_barrier = false; |
342 | 2590 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
2591 // Call allocation profiler | |
2592 AllocationProfiler::iterate_since_last_gc(); | |
2593 // Fill TLAB's and such | |
2594 ensure_parsability(true); | |
2595 } | |
2596 | |
2597 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2598 // FIXME: what is this about? | |
2599 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2600 // is set. | |
2601 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2602 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2603 // always_do_update_barrier = true; |
342 | 2604 } |
2605 | |
2606 void G1CollectedHeap::do_collection_pause() { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2607 assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2608 |
342 | 2609 // Read the GC count while holding the Heap_lock |
2610 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2611 // ensure that we do not give up the heap lock and potentially | |
2612 // pick up the wrong count | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2613 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); |
342 | 2614 |
2615 // Don't want to do a GC pause while cleanup is being completed! | |
2616 wait_for_cleanup_complete(); | |
2617 | |
2618 g1_policy()->record_stop_world_start(); | |
2619 { | |
2620 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2621 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2622 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2623 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2624 GCCause::_g1_inc_collection_pause); |
342 | 2625 VMThread::execute(&op); |
2626 } | |
2627 } | |
2628 | |
2629 void | |
2630 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2631 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2632 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2633 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2634 CGC_lock->notify(); |
342 | 2635 } |
2636 } | |
2637 | |
2638 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2639 G1CollectedHeap* _g1h; | |
2640 public: | |
2641 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2642 void do_object(oop obj) { | |
2643 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2644 "markandsweep mark should agree with concurrent deadness"); | |
2645 } | |
2646 }; | |
2647 | |
2648 void | |
2649 G1CollectedHeap::checkConcurrentMark() { | |
2650 VerifyMarkedObjsClosure verifycl(this); | |
2651 // MutexLockerEx x(getMarkBitMapLock(), | |
2652 // Mutex::_no_safepoint_check_flag); | |
678 | 2653 object_iterate(&verifycl, false); |
342 | 2654 } |
2655 | |
2656 void G1CollectedHeap::do_sync_mark() { | |
2657 _cm->checkpointRootsInitial(); | |
2658 _cm->markFromRoots(); | |
2659 _cm->checkpointRootsFinal(false); | |
2660 } | |
2661 | |
2662 // <NEW PREDICTION> | |
2663 | |
2664 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2665 bool young) { | |
2666 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2667 } | |
2668 | |
2669 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2670 predicted_time_ms) { | |
2671 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2672 } | |
2673 | |
2674 size_t G1CollectedHeap::pending_card_num() { | |
2675 size_t extra_cards = 0; | |
2676 JavaThread *curr = Threads::first(); | |
2677 while (curr != NULL) { | |
2678 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2679 extra_cards += dcq.size(); | |
2680 curr = curr->next(); | |
2681 } | |
2682 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2683 size_t buffer_size = dcqs.buffer_size(); | |
2684 size_t buffer_num = dcqs.completed_buffers_num(); | |
2685 return buffer_size * buffer_num + extra_cards; | |
2686 } | |
2687 | |
2688 size_t G1CollectedHeap::max_pending_card_num() { | |
2689 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2690 size_t buffer_size = dcqs.buffer_size(); | |
2691 size_t buffer_num = dcqs.completed_buffers_num(); | |
2692 int thread_num = Threads::number_of_threads(); | |
2693 return (buffer_num + thread_num) * buffer_size; | |
2694 } | |
2695 | |
2696 size_t G1CollectedHeap::cards_scanned() { | |
2697 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2698 return g1_rset->cardsScanned(); | |
2699 } | |
2700 | |
2701 void | |
2702 G1CollectedHeap::setup_surviving_young_words() { | |
2703 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2704 size_t array_length = g1_policy()->young_cset_length(); | |
2705 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2706 if (_surviving_young_words == NULL) { | |
2707 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2708 "Not enough space for young surv words summary."); | |
2709 } | |
2710 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2711 #ifdef ASSERT |
342 | 2712 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2713 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2714 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2715 #endif // !ASSERT |
342 | 2716 } |
2717 | |
2718 void | |
2719 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2720 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2721 size_t array_length = g1_policy()->young_cset_length(); | |
2722 for (size_t i = 0; i < array_length; ++i) | |
2723 _surviving_young_words[i] += surv_young_words[i]; | |
2724 } | |
2725 | |
2726 void | |
2727 G1CollectedHeap::cleanup_surviving_young_words() { | |
2728 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2729 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2730 _surviving_young_words = NULL; | |
2731 } | |
2732 | |
2733 // </NEW PREDICTION> | |
2734 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2735 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2736 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2737 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2738 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2739 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2740 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2741 |
1709 | 2742 #if TASKQUEUE_STATS |
2743 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
2744 st->print_raw_cr("GC Task Stats"); | |
2745 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
2746 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
2747 } | |
2748 | |
2749 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
2750 print_taskqueue_stats_hdr(st); | |
2751 | |
2752 TaskQueueStats totals; | |
2753 const int n = MAX2(workers()->total_workers(), 1); | |
2754 for (int i = 0; i < n; ++i) { | |
2755 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
2756 totals += task_queue(i)->stats; | |
2757 } | |
2758 st->print_raw("tot "); totals.print(st); st->cr(); | |
2759 | |
2760 DEBUG_ONLY(totals.verify()); | |
2761 } | |
2762 | |
2763 void G1CollectedHeap::reset_taskqueue_stats() { | |
2764 const int n = MAX2(workers()->total_workers(), 1); | |
2765 for (int i = 0; i < n; ++i) { | |
2766 task_queue(i)->stats.reset(); | |
2767 } | |
2768 } | |
2769 #endif // TASKQUEUE_STATS | |
2770 | |
342 | 2771 void |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2772 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2773 if (GC_locker::check_active_before_gc()) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2774 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2775 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2776 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2777 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2778 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2779 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2780 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2781 { |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2782 ResourceMark rm; |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2783 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2784 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2785 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2786 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2787 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2788 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2789 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2790 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2791 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2792 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2793 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2794 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2795 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2796 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2797 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2798 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2799 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2800 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2801 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2802 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2803 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2804 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2805 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2806 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2807 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2808 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2809 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2810 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2811 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2812 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2813 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2814 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2815 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2816 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2817 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2818 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2819 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2820 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2821 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2822 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2823 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2824 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2825 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2826 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2827 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2828 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2829 increment_total_collections(false /* full gc */); |
342 | 2830 |
2831 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2832 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2833 print(); |
342 | 2834 #endif |
2835 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2836 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2837 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2838 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2839 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2840 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2841 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2842 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2843 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2844 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2845 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2846 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2847 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2848 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2849 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2850 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2851 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2852 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2853 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2854 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2855 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2856 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2857 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2858 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2859 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2860 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2861 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2862 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2863 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2864 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2865 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2866 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2867 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2868 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2869 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2870 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2871 #endif // YOUNG_LIST_VERBOSE |
342 | 2872 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2873 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2874 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2875 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2876 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2877 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2878 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2879 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2880 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2881 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2882 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2883 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2884 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2885 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2886 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2887 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2888 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2889 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2890 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2891 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2892 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2893 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2894 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2895 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2896 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2897 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2898 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2899 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2900 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2901 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2902 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2903 |
1707 | 2904 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2905 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2906 // Nothing to do if we were unable to choose a collection set. |
342 | 2907 #if G1_REM_SET_LOGGING |
1707 | 2908 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
2909 print(); | |
342 | 2910 #endif |
1707 | 2911 PrepareForRSScanningClosure prepare_for_rs_scan; |
2912 collection_set_iterate(&prepare_for_rs_scan); | |
2913 | |
2914 setup_surviving_young_words(); | |
2915 | |
2916 // Set up the gc allocation regions. | |
2917 get_gc_alloc_regions(); | |
2918 | |
2919 // Actually do the work... | |
2920 evacuate_collection_set(); | |
2921 | |
2922 free_collection_set(g1_policy()->collection_set()); | |
2923 g1_policy()->clear_collection_set(); | |
2924 | |
2925 cleanup_surviving_young_words(); | |
2926 | |
2927 // Start a new incremental collection set for the next pause. | |
2928 g1_policy()->start_incremental_cset_building(); | |
2929 | |
2930 // Clear the _cset_fast_test bitmap in anticipation of adding | |
2931 // regions to the incremental collection set for the next | |
2932 // evacuation pause. | |
2933 clear_cset_fast_test(); | |
2934 | |
2935 if (g1_policy()->in_young_gc_mode()) { | |
2936 _young_list->reset_sampled_info(); | |
2937 | |
2938 // Don't check the whole heap at this point as the | |
2939 // GC alloc regions from this pause have been tagged | |
2940 // as survivors and moved on to the survivor list. | |
2941 // Survivor regions will fail the !is_young() check. | |
2942 assert(check_young_list_empty(false /* check_heap */), | |
2943 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2944 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2945 #if YOUNG_LIST_VERBOSE |
1707 | 2946 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
2947 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2948 #endif // YOUNG_LIST_VERBOSE |
342 | 2949 |
1707 | 2950 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2951 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2952 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2953 |
1707 | 2954 _young_list->reset_auxilary_lists(); |
342 | 2955 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2956 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2957 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2958 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2959 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2960 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2961 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2962 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2963 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2964 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2965 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2966 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2967 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2968 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2969 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2970 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2971 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2972 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2973 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2974 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2975 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2976 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2977 } |
342 | 2978 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2979 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2980 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2981 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2982 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2983 #endif // YOUNG_LIST_VERBOSE |
342 | 2984 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2985 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2986 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2987 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 2988 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2989 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2990 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2991 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2992 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2993 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2994 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2995 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2996 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2997 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2998 Universe::verify(false); |
342 | 2999 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3000 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3001 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3002 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3003 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3004 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3005 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3006 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3007 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3008 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3009 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3010 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3011 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3012 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3013 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3014 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3015 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3016 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3017 #endif |
342 | 3018 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3019 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3020 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3021 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3022 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3023 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3024 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3025 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3026 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3027 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3028 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3029 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3030 |
1709 | 3031 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3032 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3033 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3034 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3035 Universe::print_heap_after_gc(); |
342 | 3036 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3037 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3038 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3039 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3040 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3041 } |
342 | 3042 } |
3043 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3044 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3045 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3046 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3047 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3048 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3049 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3050 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3051 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3052 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3053 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3054 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3055 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3056 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3057 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3058 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3059 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3060 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3061 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3062 |
342 | 3063 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3064 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3065 // make sure we don't call set_gc_alloc_region() multiple times on |
3066 // the same region | |
3067 assert(r == NULL || !r->is_gc_alloc_region(), | |
3068 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3069 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3070 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3071 |
342 | 3072 HeapWord* original_top = NULL; |
3073 if (r != NULL) | |
3074 original_top = r->top(); | |
3075 | |
3076 // We will want to record the used space in r as being there before gc. | |
3077 // One we install it as a GC alloc region it's eligible for allocation. | |
3078 // So record it now and use it later. | |
3079 size_t r_used = 0; | |
3080 if (r != NULL) { | |
3081 r_used = r->used(); | |
3082 | |
3083 if (ParallelGCThreads > 0) { | |
3084 // need to take the lock to guard against two threads calling | |
3085 // get_gc_alloc_region concurrently (very unlikely but...) | |
3086 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3087 r->save_marks(); | |
3088 } | |
3089 } | |
3090 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3091 _gc_alloc_regions[purpose] = r; | |
3092 if (old_alloc_region != NULL) { | |
3093 // Replace aliases too. | |
3094 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3095 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3096 _gc_alloc_regions[ap] = r; | |
3097 } | |
3098 } | |
3099 } | |
3100 if (r != NULL) { | |
3101 push_gc_alloc_region(r); | |
3102 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3103 // We are using a region as a GC alloc region after it has been used | |
3104 // as a mutator allocation region during the current marking cycle. | |
3105 // The mutator-allocated objects are currently implicitly marked, but | |
3106 // when we move hr->next_top_at_mark_start() forward at the the end | |
3107 // of the GC pause, they won't be. We therefore mark all objects in | |
3108 // the "gap". We do this object-by-object, since marking densely | |
3109 // does not currently work right with marking bitmap iteration. This | |
3110 // means we rely on TLAB filling at the start of pauses, and no | |
3111 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3112 // to fix the marking bitmap iteration. | |
3113 HeapWord* curhw = r->next_top_at_mark_start(); | |
3114 HeapWord* t = original_top; | |
3115 | |
3116 while (curhw < t) { | |
3117 oop cur = (oop)curhw; | |
3118 // We'll assume parallel for generality. This is rare code. | |
3119 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3120 curhw = curhw + cur->size(); | |
3121 } | |
3122 assert(curhw == t, "Should have parsed correctly."); | |
3123 } | |
3124 if (G1PolicyVerbose > 1) { | |
3125 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3126 "for survivors:", r->bottom(), original_top, r->end()); | |
3127 r->print(); | |
3128 } | |
3129 g1_policy()->record_before_bytes(r_used); | |
3130 } | |
3131 } | |
3132 | |
3133 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3134 assert(Thread::current()->is_VM_thread() || | |
3135 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
3136 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
3137 "Precondition."); | |
3138 hr->set_is_gc_alloc_region(true); | |
3139 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3140 _gc_alloc_region_list = hr; | |
3141 } | |
3142 | |
3143 #ifdef G1_DEBUG | |
3144 class FindGCAllocRegion: public HeapRegionClosure { | |
3145 public: | |
3146 bool doHeapRegion(HeapRegion* r) { | |
3147 if (r->is_gc_alloc_region()) { | |
3148 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3149 r->hrs_index(), r->bottom()); | |
3150 } | |
3151 return false; | |
3152 } | |
3153 }; | |
3154 #endif // G1_DEBUG | |
3155 | |
3156 void G1CollectedHeap::forget_alloc_region_list() { | |
3157 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
3158 while (_gc_alloc_region_list != NULL) { | |
3159 HeapRegion* r = _gc_alloc_region_list; | |
3160 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3161 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3162 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3163 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3164 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3165 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3166 r->ContiguousSpace::set_saved_mark(); |
342 | 3167 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3168 r->set_next_gc_alloc_region(NULL); | |
3169 r->set_is_gc_alloc_region(false); | |
545 | 3170 if (r->is_survivor()) { |
3171 if (r->is_empty()) { | |
3172 r->set_not_young(); | |
3173 } else { | |
3174 _young_list->add_survivor_region(r); | |
3175 } | |
3176 } | |
342 | 3177 if (r->is_empty()) { |
3178 ++_free_regions; | |
3179 } | |
3180 } | |
3181 #ifdef G1_DEBUG | |
3182 FindGCAllocRegion fa; | |
3183 heap_region_iterate(&fa); | |
3184 #endif // G1_DEBUG | |
3185 } | |
3186 | |
3187 | |
3188 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3189 // TODO: allocation regions check | |
3190 return true; | |
3191 } | |
3192 | |
3193 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3194 // First, let's check that the GC alloc region list is empty (it should) |
3195 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3196 | |
342 | 3197 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3198 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3199 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3200 |
342 | 3201 // Create new GC alloc regions. |
636 | 3202 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3203 _retained_gc_alloc_regions[ap] = NULL; | |
3204 | |
3205 if (alloc_region != NULL) { | |
3206 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3207 | |
3208 // let's make sure that the GC alloc region is not tagged as such | |
3209 // outside a GC operation | |
3210 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3211 | |
3212 if (alloc_region->in_collection_set() || | |
3213 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3214 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3215 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3216 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3217 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3218 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3219 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3220 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3221 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3222 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3223 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3224 // object that may be less than the region size). |
636 | 3225 |
3226 alloc_region = NULL; | |
3227 } | |
3228 } | |
3229 | |
3230 if (alloc_region == NULL) { | |
3231 // we will get a new GC alloc region | |
342 | 3232 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3233 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3234 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3235 ++_gc_alloc_region_counts[ap]; |
1388 | 3236 if (G1PrintHeapRegions) { |
3237 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3238 "top "PTR_FORMAT, | |
3239 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3240 } | |
342 | 3241 } |
636 | 3242 |
342 | 3243 if (alloc_region != NULL) { |
636 | 3244 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3245 set_gc_alloc_region(ap, alloc_region); |
3246 } | |
636 | 3247 |
3248 assert(_gc_alloc_regions[ap] == NULL || | |
3249 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3250 "the GC alloc region should be tagged as such"); | |
3251 assert(_gc_alloc_regions[ap] == NULL || | |
3252 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3253 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3254 } |
3255 // Set alternative regions for allocation purposes that have reached | |
636 | 3256 // their limit. |
342 | 3257 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3258 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3259 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3260 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3261 } | |
3262 } | |
3263 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3264 } | |
3265 | |
636 | 3266 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3267 // We keep a separate list of all regions that have been alloc regions in |
636 | 3268 // the current collection pause. Forget that now. This method will |
3269 // untag the GC alloc regions and tear down the GC alloc region | |
3270 // list. It's desirable that no regions are tagged as GC alloc | |
3271 // outside GCs. | |
342 | 3272 forget_alloc_region_list(); |
3273 | |
3274 // The current alloc regions contain objs that have survived | |
3275 // collection. Make them no longer GC alloc regions. | |
3276 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3277 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3278 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3279 _gc_alloc_region_counts[ap] = 0; |
636 | 3280 |
3281 if (r != NULL) { | |
3282 // we retain nothing on _gc_alloc_regions between GCs | |
3283 set_gc_alloc_region(ap, NULL); | |
3284 | |
3285 if (r->is_empty()) { | |
3286 // we didn't actually allocate anything in it; let's just put | |
3287 // it on the free list | |
342 | 3288 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3289 r->set_zero_fill_complete(); | |
3290 put_free_region_on_list_locked(r); | |
636 | 3291 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3292 // retain it so that we can use it at the beginning of the next GC | |
3293 _retained_gc_alloc_regions[ap] = r; | |
342 | 3294 } |
3295 } | |
636 | 3296 } |
3297 } | |
3298 | |
3299 #ifndef PRODUCT | |
3300 // Useful for debugging | |
3301 | |
3302 void G1CollectedHeap::print_gc_alloc_regions() { | |
3303 gclog_or_tty->print_cr("GC alloc regions"); | |
3304 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3305 HeapRegion* r = _gc_alloc_regions[ap]; | |
3306 if (r == NULL) { | |
3307 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3308 } else { | |
3309 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3310 ap, r->bottom(), r->used()); | |
3311 } | |
3312 } | |
3313 } | |
3314 #endif // PRODUCT | |
342 | 3315 |
3316 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3317 _drain_in_progress = false; | |
3318 set_evac_failure_closure(cl); | |
3319 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3320 } | |
3321 | |
3322 void G1CollectedHeap::finalize_for_evac_failure() { | |
3323 assert(_evac_failure_scan_stack != NULL && | |
3324 _evac_failure_scan_stack->length() == 0, | |
3325 "Postcondition"); | |
3326 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3327 delete _evac_failure_scan_stack; |
342 | 3328 _evac_failure_scan_stack = NULL; |
3329 } | |
3330 | |
3331 | |
3332 | |
3333 // *** Sequential G1 Evacuation | |
3334 | |
3335 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
3336 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3337 // let the caller handle alloc failure | |
3338 if (alloc_region == NULL) return NULL; | |
3339 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
3340 "Either the object is humongous or the region isn't"); | |
3341 HeapWord* block = alloc_region->allocate(word_size); | |
3342 if (block == NULL) { | |
3343 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
3344 } | |
3345 return block; | |
3346 } | |
3347 | |
3348 class G1IsAliveClosure: public BoolObjectClosure { | |
3349 G1CollectedHeap* _g1; | |
3350 public: | |
3351 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3352 void do_object(oop p) { assert(false, "Do not call."); } | |
3353 bool do_object_b(oop p) { | |
3354 // It is reachable if it is outside the collection set, or is inside | |
3355 // and forwarded. | |
3356 | |
3357 #ifdef G1_DEBUG | |
3358 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3359 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3360 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3361 #endif // G1_DEBUG | |
3362 | |
3363 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3364 } | |
3365 }; | |
3366 | |
3367 class G1KeepAliveClosure: public OopClosure { | |
3368 G1CollectedHeap* _g1; | |
3369 public: | |
3370 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3371 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3372 void do_oop( oop* p) { |
342 | 3373 oop obj = *p; |
3374 #ifdef G1_DEBUG | |
3375 if (PrintGC && Verbose) { | |
3376 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3377 p, (void*) obj, (void*) *p); | |
3378 } | |
3379 #endif // G1_DEBUG | |
3380 | |
3381 if (_g1->obj_in_cs(obj)) { | |
3382 assert( obj->is_forwarded(), "invariant" ); | |
3383 *p = obj->forwardee(); | |
3384 #ifdef G1_DEBUG | |
3385 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3386 (void*) obj, (void*) *p); | |
3387 #endif // G1_DEBUG | |
3388 } | |
3389 } | |
3390 }; | |
3391 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3392 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3393 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3394 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3395 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3396 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3397 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3398 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3399 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3400 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3401 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3402 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3403 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3404 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3405 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3406 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3407 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3408 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3409 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3410 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3411 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3412 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3413 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3414 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3415 |
342 | 3416 class RemoveSelfPointerClosure: public ObjectClosure { |
3417 private: | |
3418 G1CollectedHeap* _g1; | |
3419 ConcurrentMark* _cm; | |
3420 HeapRegion* _hr; | |
3421 size_t _prev_marked_bytes; | |
3422 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3423 OopsInHeapRegionClosure *_cl; |
342 | 3424 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3425 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3426 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3427 _next_marked_bytes(0), _cl(cl) {} |
342 | 3428 |
3429 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3430 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3431 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3432 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3433 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3434 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3435 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3436 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3437 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3438 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3439 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3440 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3441 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3442 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3443 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3444 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3445 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3446 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3447 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3448 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3449 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3450 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3451 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3452 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3453 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3454 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3455 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3456 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3457 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3458 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3459 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3460 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3461 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3462 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3463 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3464 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3465 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3466 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3467 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3468 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3469 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3470 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3471 CollectedHeap::fill_with_object(mr); |
342 | 3472 _cm->clearRangeBothMaps(mr); |
3473 } | |
3474 } | |
3475 }; | |
3476 | |
3477 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3478 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3479 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3480 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3481 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3482 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3483 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3484 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3485 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3486 } |
342 | 3487 HeapRegion* cur = g1_policy()->collection_set(); |
3488 while (cur != NULL) { | |
3489 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3490 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3491 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3492 if (cur->evacuation_failed()) { |
3493 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3494 cl->set_region(cur); |
342 | 3495 cur->object_iterate(&rspc); |
3496 | |
3497 // A number of manipulations to make the TAMS be the current top, | |
3498 // and the marked bytes be the ones observed in the iteration. | |
3499 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3500 // The comments below are the postconditions achieved by the | |
3501 // calls. Note especially the last such condition, which says that | |
3502 // the count of marked bytes has been properly restored. | |
3503 cur->note_start_of_marking(false); | |
3504 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3505 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3506 // _next_marked_bytes == prev_marked_bytes. | |
3507 cur->note_end_of_marking(); | |
3508 // _prev_top_at_mark_start == top(), | |
3509 // _prev_marked_bytes == prev_marked_bytes | |
3510 } | |
3511 // If there is no mark in progress, we modified the _next variables | |
3512 // above needlessly, but harmlessly. | |
3513 if (_g1h->mark_in_progress()) { | |
3514 cur->note_start_of_marking(false); | |
3515 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3516 // _next_marked_bytes == next_marked_bytes. | |
3517 } | |
3518 | |
3519 // Now make sure the region has the right index in the sorted array. | |
3520 g1_policy()->note_change_in_marked_bytes(cur); | |
3521 } | |
3522 cur = cur->next_in_collection_set(); | |
3523 } | |
3524 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3525 | |
3526 // Now restore saved marks, if any. | |
3527 if (_objs_with_preserved_marks != NULL) { | |
3528 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3529 assert(_objs_with_preserved_marks->length() == | |
3530 _preserved_marks_of_objs->length(), "Both or none."); | |
3531 guarantee(_objs_with_preserved_marks->length() == | |
3532 _preserved_marks_of_objs->length(), "Both or none."); | |
3533 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3534 oop obj = _objs_with_preserved_marks->at(i); | |
3535 markOop m = _preserved_marks_of_objs->at(i); | |
3536 obj->set_mark(m); | |
3537 } | |
3538 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3539 delete _objs_with_preserved_marks; | |
3540 delete _preserved_marks_of_objs; | |
3541 _objs_with_preserved_marks = NULL; | |
3542 _preserved_marks_of_objs = NULL; | |
3543 } | |
3544 } | |
3545 | |
3546 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3547 _evac_failure_scan_stack->push(obj); | |
3548 } | |
3549 | |
3550 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3551 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3552 | |
3553 while (_evac_failure_scan_stack->length() > 0) { | |
3554 oop obj = _evac_failure_scan_stack->pop(); | |
3555 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3556 obj->oop_iterate_backwards(_evac_failure_closure); | |
3557 } | |
3558 } | |
3559 | |
3560 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3561 markOop m = old->mark(); | |
3562 // forward to self | |
3563 assert(!old->is_forwarded(), "precondition"); | |
3564 | |
3565 old->forward_to(old); | |
3566 handle_evacuation_failure_common(old, m); | |
3567 } | |
3568 | |
3569 oop | |
3570 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3571 oop old) { | |
3572 markOop m = old->mark(); | |
3573 oop forward_ptr = old->forward_to_atomic(old); | |
3574 if (forward_ptr == NULL) { | |
3575 // Forward-to-self succeeded. | |
3576 if (_evac_failure_closure != cl) { | |
3577 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3578 assert(!_drain_in_progress, | |
3579 "Should only be true while someone holds the lock."); | |
3580 // Set the global evac-failure closure to the current thread's. | |
3581 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3582 set_evac_failure_closure(cl); | |
3583 // Now do the common part. | |
3584 handle_evacuation_failure_common(old, m); | |
3585 // Reset to NULL. | |
3586 set_evac_failure_closure(NULL); | |
3587 } else { | |
3588 // The lock is already held, and this is recursive. | |
3589 assert(_drain_in_progress, "This should only be the recursive case."); | |
3590 handle_evacuation_failure_common(old, m); | |
3591 } | |
3592 return old; | |
3593 } else { | |
3594 // Someone else had a place to copy it. | |
3595 return forward_ptr; | |
3596 } | |
3597 } | |
3598 | |
3599 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3600 set_evacuation_failed(true); | |
3601 | |
3602 preserve_mark_if_necessary(old, m); | |
3603 | |
3604 HeapRegion* r = heap_region_containing(old); | |
3605 if (!r->evacuation_failed()) { | |
3606 r->set_evacuation_failed(true); | |
1282 | 3607 if (G1PrintHeapRegions) { |
342 | 3608 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
3609 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3610 r, r->bottom(), r->end()); | |
3611 } | |
3612 } | |
3613 | |
3614 push_on_evac_failure_scan_stack(old); | |
3615 | |
3616 if (!_drain_in_progress) { | |
3617 // prevent recursion in copy_to_survivor_space() | |
3618 _drain_in_progress = true; | |
3619 drain_evac_failure_scan_stack(); | |
3620 _drain_in_progress = false; | |
3621 } | |
3622 } | |
3623 | |
3624 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3625 if (m != markOopDesc::prototype()) { | |
3626 if (_objs_with_preserved_marks == NULL) { | |
3627 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3628 _objs_with_preserved_marks = | |
3629 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3630 _preserved_marks_of_objs = | |
3631 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3632 } | |
3633 _objs_with_preserved_marks->push(obj); | |
3634 _preserved_marks_of_objs->push(m); | |
3635 } | |
3636 } | |
3637 | |
3638 // *** Parallel G1 Evacuation | |
3639 | |
3640 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3641 size_t word_size) { | |
3642 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3643 // let the caller handle alloc failure | |
3644 if (alloc_region == NULL) return NULL; | |
3645 | |
3646 HeapWord* block = alloc_region->par_allocate(word_size); | |
3647 if (block == NULL) { | |
3648 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3649 Mutex::_no_safepoint_check_flag); | |
3650 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3651 } | |
3652 return block; | |
3653 } | |
3654 | |
545 | 3655 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
3656 bool par) { | |
3657 // Another thread might have obtained alloc_region for the given | |
3658 // purpose, and might be attempting to allocate in it, and might | |
3659 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3660 // region below until we're sure the last allocation has happened. | |
3661 // We ensure this by allocating the remaining space with a garbage | |
3662 // object. | |
3663 if (par) par_allocate_remaining_space(alloc_region); | |
3664 // Now we can do the post-GC stuff on the region. | |
3665 alloc_region->note_end_of_copying(); | |
3666 g1_policy()->record_after_bytes(alloc_region->used()); | |
3667 } | |
3668 | |
342 | 3669 HeapWord* |
3670 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3671 HeapRegion* alloc_region, | |
3672 bool par, | |
3673 size_t word_size) { | |
3674 HeapWord* block = NULL; | |
3675 // In the parallel case, a previous thread to obtain the lock may have | |
3676 // already assigned a new gc_alloc_region. | |
3677 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3678 assert(par, "But should only happen in parallel case."); | |
3679 alloc_region = _gc_alloc_regions[purpose]; | |
3680 if (alloc_region == NULL) return NULL; | |
3681 block = alloc_region->par_allocate(word_size); | |
3682 if (block != NULL) return block; | |
3683 // Otherwise, continue; this new region is empty, too. | |
3684 } | |
3685 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 3686 retire_alloc_region(alloc_region, par); |
342 | 3687 |
3688 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3689 // Cannot allocate more regions for the given purpose. | |
3690 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3691 // Is there an alternative? | |
3692 if (purpose != alt_purpose) { | |
3693 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3694 // Has not the alternative region been aliased? | |
545 | 3695 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 3696 // Try to allocate in the alternative region. |
3697 if (par) { | |
3698 block = alt_region->par_allocate(word_size); | |
3699 } else { | |
3700 block = alt_region->allocate(word_size); | |
3701 } | |
3702 // Make an alias. | |
3703 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 3704 if (block != NULL) { |
3705 return block; | |
3706 } | |
3707 retire_alloc_region(alt_region, par); | |
342 | 3708 } |
3709 // Both the allocation region and the alternative one are full | |
3710 // and aliased, replace them with a new allocation region. | |
3711 purpose = alt_purpose; | |
3712 } else { | |
3713 set_gc_alloc_region(purpose, NULL); | |
3714 return NULL; | |
3715 } | |
3716 } | |
3717 | |
3718 // Now allocate a new region for allocation. | |
3719 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3720 | |
3721 // let the caller handle alloc failure | |
3722 if (alloc_region != NULL) { | |
3723 | |
3724 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3725 assert(alloc_region->saved_mark_at_top(), | |
3726 "Mark should have been saved already."); | |
3727 // We used to assert that the region was zero-filled here, but no | |
3728 // longer. | |
3729 | |
3730 // This must be done last: once it's installed, other regions may | |
3731 // allocate in it (without holding the lock.) | |
3732 set_gc_alloc_region(purpose, alloc_region); | |
3733 | |
3734 if (par) { | |
3735 block = alloc_region->par_allocate(word_size); | |
3736 } else { | |
3737 block = alloc_region->allocate(word_size); | |
3738 } | |
3739 // Caller handles alloc failure. | |
3740 } else { | |
3741 // This sets other apis using the same old alloc region to NULL, also. | |
3742 set_gc_alloc_region(purpose, NULL); | |
3743 } | |
3744 return block; // May be NULL. | |
3745 } | |
3746 | |
3747 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3748 HeapWord* block = NULL; | |
3749 size_t free_words; | |
3750 do { | |
3751 free_words = r->free()/HeapWordSize; | |
3752 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
3753 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 3754 // Otherwise, try to claim it. |
3755 block = r->par_allocate(free_words); | |
3756 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3757 fill_with_object(block, free_words); |
342 | 3758 } |
3759 | |
3760 #ifndef PRODUCT | |
3761 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3762 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3763 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3764 return true; | |
3765 } | |
3766 #endif // PRODUCT | |
3767 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3768 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3769 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3770 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3771 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3772 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3773 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3774 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3775 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3776 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3777 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3778 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3779 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3780 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3781 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3782 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3783 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3784 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3785 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3786 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3787 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3788 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3789 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3790 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3791 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3792 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3793 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3794 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3795 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3796 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3797 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3798 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3799 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3800 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3801 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3802 } |
342 | 3803 |
1709 | 3804 void |
3805 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
3806 { | |
3807 st->print_raw_cr("GC Termination Stats"); | |
3808 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
3809 " ------waste (KiB)------"); | |
3810 st->print_raw_cr("thr ms ms % ms % attempts" | |
3811 " total alloc undo"); | |
3812 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
3813 " ------- ------- -------"); | |
3814 } | |
3815 | |
3816 void | |
3817 G1ParScanThreadState::print_termination_stats(int i, | |
3818 outputStream* const st) const | |
3819 { | |
3820 const double elapsed_ms = elapsed_time() * 1000.0; | |
3821 const double s_roots_ms = strong_roots_time() * 1000.0; | |
3822 const double term_ms = term_time() * 1000.0; | |
3823 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
3824 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
3825 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
3826 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
3827 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
3828 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
3829 alloc_buffer_waste() * HeapWordSize / K, | |
3830 undo_waste() * HeapWordSize / K); | |
3831 } | |
3832 | |
342 | 3833 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
3834 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
3835 _par_scan_state(par_scan_state) { } | |
3836 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3837 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 3838 // This is called _after_ do_oop_work has been called, hence after |
3839 // the object has been relocated to its new location and *p points | |
3840 // to its new location. | |
3841 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3842 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3843 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3844 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3845 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 3846 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3847 HeapWord* addr = (HeapWord*)obj; |
342 | 3848 if (_g1->is_in_g1_reserved(addr)) |
3849 _cm->grayRoot(oop(addr)); | |
3850 } | |
3851 } | |
3852 | |
3853 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
3854 size_t word_sz = old->size(); | |
3855 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
3856 // +1 to make the -1 indexes valid... | |
3857 int young_index = from_region->young_index_in_cset()+1; | |
3858 assert( (from_region->is_young() && young_index > 0) || | |
3859 (!from_region->is_young() && young_index == 0), "invariant" ); | |
3860 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
3861 markOop m = old->mark(); | |
545 | 3862 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
3863 : m->age(); | |
3864 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 3865 word_sz); |
3866 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
3867 oop obj = oop(obj_ptr); | |
3868 | |
3869 if (obj_ptr == NULL) { | |
3870 // This will either forward-to-self, or detect that someone else has | |
3871 // installed a forwarding pointer. | |
3872 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
3873 return _g1->handle_evacuation_failure_par(cl, old); | |
3874 } | |
3875 | |
526 | 3876 // We're going to allocate linearly, so might as well prefetch ahead. |
3877 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
3878 | |
342 | 3879 oop forward_ptr = old->forward_to_atomic(obj); |
3880 if (forward_ptr == NULL) { | |
3881 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 3882 if (g1p->track_object_age(alloc_purpose)) { |
3883 // We could simply do obj->incr_age(). However, this causes a | |
3884 // performance issue. obj->incr_age() will first check whether | |
3885 // the object has a displaced mark by checking its mark word; | |
3886 // getting the mark word from the new location of the object | |
3887 // stalls. So, given that we already have the mark word and we | |
3888 // are about to install it anyway, it's better to increase the | |
3889 // age on the mark word, when the object does not have a | |
3890 // displaced mark word. We're not expecting many objects to have | |
3891 // a displaced marked word, so that case is not optimized | |
3892 // further (it could be...) and we simply call obj->incr_age(). | |
3893 | |
3894 if (m->has_displaced_mark_helper()) { | |
3895 // in this case, we have to install the mark word first, | |
3896 // otherwise obj looks to be forwarded (the old mark word, | |
3897 // which contains the forward pointer, was copied) | |
3898 obj->set_mark(m); | |
3899 obj->incr_age(); | |
3900 } else { | |
3901 m = m->incr_age(); | |
545 | 3902 obj->set_mark(m); |
526 | 3903 } |
545 | 3904 _par_scan_state->age_table()->add(obj, word_sz); |
3905 } else { | |
3906 obj->set_mark(m); | |
526 | 3907 } |
3908 | |
342 | 3909 // preserve "next" mark bit |
3910 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
3911 if (!use_local_bitmaps || | |
3912 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
3913 // if we couldn't mark it on the local bitmap (this happens when | |
3914 // the object was not allocated in the GCLab), we have to bite | |
3915 // the bullet and do the standard parallel mark | |
3916 _cm->markAndGrayObjectIfNecessary(obj); | |
3917 } | |
3918 #if 1 | |
3919 if (_g1->isMarkedNext(old)) { | |
3920 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
3921 } | |
3922 #endif | |
3923 } | |
3924 | |
3925 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
3926 surv_young_words[young_index] += word_sz; | |
3927 | |
3928 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
3929 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3930 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3931 _par_scan_state->push_on_queue(old_p); |
342 | 3932 } else { |
526 | 3933 // No point in using the slower heap_region_containing() method, |
3934 // given that we know obj is in the heap. | |
3935 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 3936 obj->oop_iterate_backwards(_scanner); |
3937 } | |
3938 } else { | |
3939 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
3940 obj = forward_ptr; | |
3941 } | |
3942 return obj; | |
3943 } | |
3944 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3945 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3946 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3947 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3948 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3949 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 3950 assert(barrier != G1BarrierRS || obj != NULL, |
3951 "Precondition: G1BarrierRS implies obj is nonNull"); | |
3952 | |
526 | 3953 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3954 if (_g1->in_cset_fast_test(obj)) { |
342 | 3955 #if G1_REM_SET_LOGGING |
526 | 3956 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
3957 "into CS.", p, (void*) obj); | |
342 | 3958 #endif |
526 | 3959 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3960 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 3961 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3962 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3963 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 3964 } |
526 | 3965 // When scanning the RS, we only care about objs in CS. |
3966 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3967 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 3968 } |
526 | 3969 } |
3970 | |
3971 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3972 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 3973 } |
3974 | |
3975 if (do_gen_barrier && obj != NULL) { | |
3976 par_do_barrier(p); | |
3977 } | |
3978 } | |
3979 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3980 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3981 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3982 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3983 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 3984 assert(has_partial_array_mask(p), "invariant"); |
3985 oop old = clear_partial_array_mask(p); | |
342 | 3986 assert(old->is_objArray(), "must be obj array"); |
3987 assert(old->is_forwarded(), "must be forwarded"); | |
3988 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
3989 | |
3990 objArrayOop obj = objArrayOop(old->forwardee()); | |
3991 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
3992 // Process ParGCArrayScanChunk elements now | |
3993 // and push the remainder back onto queue | |
3994 int start = arrayOop(old)->length(); | |
3995 int end = obj->length(); | |
3996 int remainder = end - start; | |
3997 assert(start <= end, "just checking"); | |
3998 if (remainder > 2 * ParGCArrayScanChunk) { | |
3999 // Test above combines last partial chunk with a full chunk | |
4000 end = start + ParGCArrayScanChunk; | |
4001 arrayOop(old)->set_length(end); | |
4002 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4003 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4004 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4005 _par_scan_state->push_on_queue(old_p); |
342 | 4006 } else { |
4007 // Restore length so that the heap remains parsable in | |
4008 // case of evacuation failure. | |
4009 arrayOop(old)->set_length(end); | |
4010 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4011 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4012 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4013 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4014 } |
4015 | |
4016 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4017 protected: | |
4018 G1CollectedHeap* _g1h; | |
4019 G1ParScanThreadState* _par_scan_state; | |
4020 RefToScanQueueSet* _queues; | |
4021 ParallelTaskTerminator* _terminator; | |
4022 | |
4023 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4024 RefToScanQueueSet* queues() { return _queues; } | |
4025 ParallelTaskTerminator* terminator() { return _terminator; } | |
4026 | |
4027 public: | |
4028 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4029 G1ParScanThreadState* par_scan_state, | |
4030 RefToScanQueueSet* queues, | |
4031 ParallelTaskTerminator* terminator) | |
4032 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4033 _queues(queues), _terminator(terminator) {} | |
4034 | |
4035 void do_void() { | |
4036 G1ParScanThreadState* pss = par_scan_state(); | |
4037 while (true) { | |
4038 pss->trim_queue(); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4039 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4040 StarTask stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4041 if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
526 | 4042 // slightly paranoid tests; I'm trying to catch potential |
4043 // problems before we go into push_on_queue to know where the | |
4044 // problem is coming from | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4045 assert((oop*)stolen_task != NULL, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4046 if (stolen_task.is_narrow()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4047 assert(UseCompressedOops, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4048 narrowOop* p = (narrowOop*) stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4049 assert(has_partial_array_mask(p) || |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4050 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4051 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4052 } else { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4053 oop* p = (oop*) stolen_task; |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4054 assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4055 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4056 } |
342 | 4057 continue; |
4058 } | |
4059 pss->start_term_time(); | |
4060 if (terminator()->offer_termination()) break; | |
4061 pss->end_term_time(); | |
4062 } | |
4063 pss->end_term_time(); | |
4064 pss->retire_alloc_buffers(); | |
4065 } | |
4066 }; | |
4067 | |
4068 class G1ParTask : public AbstractGangTask { | |
4069 protected: | |
4070 G1CollectedHeap* _g1h; | |
4071 RefToScanQueueSet *_queues; | |
4072 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4073 int _n_workers; |
342 | 4074 |
4075 Mutex _stats_lock; | |
4076 Mutex* stats_lock() { return &_stats_lock; } | |
4077 | |
4078 size_t getNCards() { | |
4079 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4080 / G1BlockOffsetSharedArray::N_bytes; | |
4081 } | |
4082 | |
4083 public: | |
4084 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4085 : AbstractGangTask("G1 collection"), | |
4086 _g1h(g1h), | |
4087 _queues(task_queues), | |
4088 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4089 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4090 _n_workers(workers) |
342 | 4091 {} |
4092 | |
4093 RefToScanQueueSet* queues() { return _queues; } | |
4094 | |
4095 RefToScanQueue *work_queue(int i) { | |
4096 return queues()->queue(i); | |
4097 } | |
4098 | |
4099 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4100 if (i >= _n_workers) return; // no work needed this round |
1611 | 4101 |
4102 double start_time_ms = os::elapsedTime() * 1000.0; | |
4103 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4104 | |
342 | 4105 ResourceMark rm; |
4106 HandleMark hm; | |
4107 | |
526 | 4108 G1ParScanThreadState pss(_g1h, i); |
4109 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4110 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4111 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4112 |
4113 pss.set_evac_closure(&scan_evac_cl); | |
4114 pss.set_evac_failure_closure(&evac_failure_cl); | |
4115 pss.set_partial_scan_closure(&partial_scan_cl); | |
4116 | |
4117 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4118 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4119 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4120 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4121 |
342 | 4122 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4123 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4124 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4125 | |
4126 OopsInHeapRegionClosure *scan_root_cl; | |
4127 OopsInHeapRegionClosure *scan_perm_cl; | |
4128 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4129 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4130 scan_root_cl = &scan_mark_root_cl; |
4131 scan_perm_cl = &scan_mark_perm_cl; | |
4132 } else { | |
4133 scan_root_cl = &only_scan_root_cl; | |
4134 scan_perm_cl = &only_scan_perm_cl; | |
4135 } | |
4136 | |
4137 pss.start_strong_roots(); | |
4138 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4139 SharedHeap::SO_AllClasses, | |
4140 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4141 &push_heap_rs_cl, |
342 | 4142 scan_perm_cl, |
4143 i); | |
4144 pss.end_strong_roots(); | |
4145 { | |
4146 double start = os::elapsedTime(); | |
4147 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4148 evac.do_void(); | |
4149 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4150 double term_ms = pss.term_time()*1000.0; | |
4151 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4152 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4153 } |
1282 | 4154 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4155 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4156 | |
4157 // Clean up any par-expanded rem sets. | |
4158 HeapRegionRemSet::par_cleanup(); | |
4159 | |
4160 if (ParallelGCVerbose) { | |
1709 | 4161 MutexLocker x(stats_lock()); |
4162 pss.print_termination_stats(i); | |
342 | 4163 } |
4164 | |
4165 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
4166 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
1611 | 4167 double end_time_ms = os::elapsedTime() * 1000.0; |
4168 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4169 } |
4170 }; | |
4171 | |
4172 // *** Common G1 Evacuation Stuff | |
4173 | |
4174 void | |
4175 G1CollectedHeap:: | |
4176 g1_process_strong_roots(bool collecting_perm_gen, | |
4177 SharedHeap::ScanningOption so, | |
4178 OopClosure* scan_non_heap_roots, | |
4179 OopsInHeapRegionClosure* scan_rs, | |
4180 OopsInGenClosure* scan_perm, | |
4181 int worker_i) { | |
4182 // First scan the strong roots, including the perm gen. | |
4183 double ext_roots_start = os::elapsedTime(); | |
4184 double closure_app_time_sec = 0.0; | |
4185 | |
4186 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4187 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4188 buf_scan_perm.set_generation(perm_gen()); | |
4189 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4190 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4191 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4192 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4193 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4194 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4195 collecting_perm_gen, so, |
342 | 4196 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4197 &eager_scan_code_roots, |
342 | 4198 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4199 |
342 | 4200 // Finish up any enqueued closure apps. |
4201 buf_scan_non_heap_roots.done(); | |
4202 buf_scan_perm.done(); | |
4203 double ext_roots_end = os::elapsedTime(); | |
4204 g1_policy()->reset_obj_copy_time(worker_i); | |
4205 double obj_copy_time_sec = | |
4206 buf_scan_non_heap_roots.closure_app_seconds() + | |
4207 buf_scan_perm.closure_app_seconds(); | |
4208 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4209 double ext_root_time_ms = | |
4210 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4211 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4212 | |
4213 // Scan strong roots in mark stack. | |
4214 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4215 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4216 } | |
4217 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4218 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4219 | |
4220 // XXX What should this be doing in the parallel case? | |
4221 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4222 // Now scan the complement of the collection set. | |
4223 if (scan_rs != NULL) { | |
4224 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4225 } | |
4226 // Finish with the ref_processor roots. | |
4227 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4228 ref_processor()->oops_do(scan_non_heap_roots); | |
4229 } | |
4230 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4231 _process_strong_tasks->all_tasks_completed(); | |
4232 } | |
4233 | |
4234 void | |
4235 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4236 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4237 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4238 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4239 } |
4240 | |
4241 | |
4242 class SaveMarksClosure: public HeapRegionClosure { | |
4243 public: | |
4244 bool doHeapRegion(HeapRegion* r) { | |
4245 r->save_marks(); | |
4246 return false; | |
4247 } | |
4248 }; | |
4249 | |
4250 void G1CollectedHeap::save_marks() { | |
4251 if (ParallelGCThreads == 0) { | |
4252 SaveMarksClosure sm; | |
4253 heap_region_iterate(&sm); | |
4254 } | |
4255 // We do this even in the parallel case | |
4256 perm_gen()->save_marks(); | |
4257 } | |
4258 | |
4259 void G1CollectedHeap::evacuate_collection_set() { | |
4260 set_evacuation_failed(false); | |
4261 | |
4262 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4263 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4264 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4265 | |
342 | 4266 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4267 set_par_threads(n_workers); | |
4268 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4269 | |
4270 init_for_evac_failure(NULL); | |
4271 | |
4272 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4273 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4274 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4275 double start_par = os::elapsedTime(); |
4276 if (ParallelGCThreads > 0) { | |
4277 // The individual threads will set their evac-failure closures. | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4278 StrongRootsScope srs(this); |
1709 | 4279 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4280 workers()->run_task(&g1_par_task); |
4281 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4282 StrongRootsScope srs(this); |
342 | 4283 g1_par_task.work(0); |
4284 } | |
4285 | |
4286 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4287 g1_policy()->record_par_time(par_time); | |
4288 set_par_threads(0); | |
4289 // Is this the right thing to do here? We don't save marks | |
4290 // on individual heap regions when we allocate from | |
4291 // them in parallel, so this seems like the correct place for this. | |
545 | 4292 retire_all_alloc_regions(); |
342 | 4293 { |
4294 G1IsAliveClosure is_alive(this); | |
4295 G1KeepAliveClosure keep_alive(this); | |
4296 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4297 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4298 release_gc_alloc_regions(false /* totally */); |
342 | 4299 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4300 |
889 | 4301 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4302 concurrent_g1_refine()->set_use_cache(true); |
4303 | |
4304 finalize_for_evac_failure(); | |
4305 | |
4306 // Must do this before removing self-forwarding pointers, which clears | |
4307 // the per-region evac-failure flags. | |
4308 concurrent_mark()->complete_marking_in_collection_set(); | |
4309 | |
4310 if (evacuation_failed()) { | |
4311 remove_self_forwarding_pointers(); | |
4312 if (PrintGCDetails) { | |
4313 gclog_or_tty->print(" (evacuation failed)"); | |
4314 } else if (PrintGC) { | |
4315 gclog_or_tty->print("--"); | |
4316 } | |
4317 } | |
4318 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4319 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4320 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4321 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4322 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4323 |
4324 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4325 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4326 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4327 } |
342 | 4328 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4329 } | |
4330 | |
4331 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4332 size_t pre_used = 0; | |
4333 size_t cleared_h_regions = 0; | |
4334 size_t freed_regions = 0; | |
4335 UncleanRegionList local_list; | |
4336 | |
4337 HeapWord* start = hr->bottom(); | |
4338 HeapWord* end = hr->prev_top_at_mark_start(); | |
4339 size_t used_bytes = hr->used(); | |
4340 size_t live_bytes = hr->max_live_bytes(); | |
4341 if (used_bytes > 0) { | |
4342 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4343 } else { | |
4344 guarantee( live_bytes == 0, "invariant" ); | |
4345 } | |
4346 | |
4347 size_t garbage_bytes = used_bytes - live_bytes; | |
4348 if (garbage_bytes > 0) | |
4349 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4350 | |
4351 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4352 &local_list); | |
4353 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4354 &local_list); | |
4355 } | |
4356 | |
4357 void | |
4358 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4359 size_t& pre_used, | |
4360 size_t& cleared_h_regions, | |
4361 size_t& freed_regions, | |
4362 UncleanRegionList* list, | |
4363 bool par) { | |
4364 pre_used += hr->used(); | |
4365 if (hr->isHumongous()) { | |
4366 assert(hr->startsHumongous(), | |
4367 "Only the start of a humongous region should be freed."); | |
4368 int ind = _hrs->find(hr); | |
4369 assert(ind != -1, "Should have an index."); | |
4370 // Clear the start region. | |
4371 hr->hr_clear(par, true /*clear_space*/); | |
4372 list->insert_before_head(hr); | |
4373 cleared_h_regions++; | |
4374 freed_regions++; | |
4375 // Clear any continued regions. | |
4376 ind++; | |
4377 while ((size_t)ind < n_regions()) { | |
4378 HeapRegion* hrc = _hrs->at(ind); | |
4379 if (!hrc->continuesHumongous()) break; | |
4380 // Otherwise, does continue the H region. | |
4381 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4382 hrc->hr_clear(par, true /*clear_space*/); | |
4383 cleared_h_regions++; | |
4384 freed_regions++; | |
4385 list->insert_before_head(hrc); | |
4386 ind++; | |
4387 } | |
4388 } else { | |
4389 hr->hr_clear(par, true /*clear_space*/); | |
4390 list->insert_before_head(hr); | |
4391 freed_regions++; | |
4392 // If we're using clear2, this should not be enabled. | |
4393 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4394 } | |
4395 } | |
4396 | |
4397 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4398 size_t cleared_h_regions, | |
4399 size_t freed_regions, | |
4400 UncleanRegionList* list) { | |
4401 if (list != NULL && list->sz() > 0) { | |
4402 prepend_region_list_on_unclean_list(list); | |
4403 } | |
4404 // Acquire a lock, if we're parallel, to update possibly-shared | |
4405 // variables. | |
4406 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4407 { | |
4408 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4409 _summary_bytes_used -= pre_used; | |
4410 _num_humongous_regions -= (int) cleared_h_regions; | |
4411 _free_regions += freed_regions; | |
4412 } | |
4413 } | |
4414 | |
4415 | |
4416 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4417 while (list != NULL) { | |
4418 guarantee( list->is_young(), "invariant" ); | |
4419 | |
4420 HeapWord* bottom = list->bottom(); | |
4421 HeapWord* end = list->end(); | |
4422 MemRegion mr(bottom, end); | |
4423 ct_bs->dirty(mr); | |
4424 | |
4425 list = list->get_next_young_region(); | |
4426 } | |
4427 } | |
4428 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4429 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4430 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4431 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4432 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4433 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4434 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4435 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4436 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4437 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4438 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4439 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4440 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4441 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4442 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4443 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4444 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4445 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4446 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4447 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4448 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4449 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4450 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4451 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4452 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4453 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4454 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4455 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4456 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4457 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4458 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4459 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4460 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4461 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4462 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4463 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4464 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4465 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4466 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4467 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4468 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4469 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4470 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4471 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4472 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4473 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4474 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4475 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4476 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4477 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4478 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4479 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4480 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4481 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4482 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4483 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4484 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4485 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4486 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4487 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4488 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4489 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4490 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4491 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4492 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4493 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4494 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4495 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4496 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4497 |
342 | 4498 void G1CollectedHeap::cleanUpCardTable() { |
4499 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4500 double start = os::elapsedTime(); | |
4501 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4502 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4503 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4504 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4505 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4506 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4507 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4508 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4509 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4510 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4511 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4512 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4513 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4514 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4515 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4516 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4517 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4518 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4519 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4520 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4521 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4522 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4523 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4524 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4525 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4526 |
342 | 4527 double elapsed = os::elapsedTime() - start; |
4528 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4529 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4530 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4531 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4532 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4533 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4534 #endif |
342 | 4535 } |
4536 | |
4537 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4538 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4539 do_collection_pause(); | |
4540 } | |
4541 } | |
4542 | |
4543 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4544 double young_time_ms = 0.0; | |
4545 double non_young_time_ms = 0.0; | |
4546 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4547 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4548 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4549 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4550 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4551 |
342 | 4552 G1CollectorPolicy* policy = g1_policy(); |
4553 | |
4554 double start_sec = os::elapsedTime(); | |
4555 bool non_young = true; | |
4556 | |
4557 HeapRegion* cur = cs_head; | |
4558 int age_bound = -1; | |
4559 size_t rs_lengths = 0; | |
4560 | |
4561 while (cur != NULL) { | |
4562 if (non_young) { | |
4563 if (cur->is_young()) { | |
4564 double end_sec = os::elapsedTime(); | |
4565 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4566 non_young_time_ms += elapsed_ms; | |
4567 | |
4568 start_sec = os::elapsedTime(); | |
4569 non_young = false; | |
4570 } | |
4571 } else { | |
4572 if (!cur->is_on_free_list()) { | |
4573 double end_sec = os::elapsedTime(); | |
4574 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4575 young_time_ms += elapsed_ms; | |
4576 | |
4577 start_sec = os::elapsedTime(); | |
4578 non_young = true; | |
4579 } | |
4580 } | |
4581 | |
4582 rs_lengths += cur->rem_set()->occupied(); | |
4583 | |
4584 HeapRegion* next = cur->next_in_collection_set(); | |
4585 assert(cur->in_collection_set(), "bad CS"); | |
4586 cur->set_next_in_collection_set(NULL); | |
4587 cur->set_in_collection_set(false); | |
4588 | |
4589 if (cur->is_young()) { | |
4590 int index = cur->young_index_in_cset(); | |
4591 guarantee( index != -1, "invariant" ); | |
4592 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4593 size_t words_survived = _surviving_young_words[index]; | |
4594 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4595 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4596 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4597 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4598 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4599 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4600 cur->set_next_young_region(NULL); |
342 | 4601 } else { |
4602 int index = cur->young_index_in_cset(); | |
4603 guarantee( index == -1, "invariant" ); | |
4604 } | |
4605 | |
4606 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4607 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4608 "invariant" ); | |
4609 | |
4610 if (!cur->evacuation_failed()) { | |
4611 // And the region is empty. | |
4612 assert(!cur->is_empty(), | |
4613 "Should not have empty regions in a CS."); | |
4614 free_region(cur); | |
4615 } else { | |
4616 cur->uninstall_surv_rate_group(); | |
4617 if (cur->is_young()) | |
4618 cur->set_young_index_in_cset(-1); | |
4619 cur->set_not_young(); | |
4620 cur->set_evacuation_failed(false); | |
4621 } | |
4622 cur = next; | |
4623 } | |
4624 | |
4625 policy->record_max_rs_lengths(rs_lengths); | |
4626 policy->cset_regions_freed(); | |
4627 | |
4628 double end_sec = os::elapsedTime(); | |
4629 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4630 if (non_young) | |
4631 non_young_time_ms += elapsed_ms; | |
4632 else | |
4633 young_time_ms += elapsed_ms; | |
4634 | |
4635 policy->record_young_free_cset_time_ms(young_time_ms); | |
4636 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4637 } | |
4638 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4639 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4640 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4641 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4642 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4643 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4644 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4645 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4646 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4647 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4648 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4649 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4650 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4651 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4652 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4653 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4654 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4655 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4656 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4657 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4658 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4659 |
342 | 4660 HeapRegion* |
4661 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4662 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4663 HeapRegion* res = pop_unclean_region_list_locked(); | |
4664 if (res != NULL) { | |
4665 assert(!res->continuesHumongous() && | |
4666 res->zero_fill_state() != HeapRegion::Allocated, | |
4667 "Only free regions on unclean list."); | |
4668 if (zero_filled) { | |
4669 res->ensure_zero_filled_locked(); | |
4670 res->set_zero_fill_allocated(); | |
4671 } | |
4672 } | |
4673 return res; | |
4674 } | |
4675 | |
4676 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4677 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4678 return alloc_region_from_unclean_list_locked(zero_filled); | |
4679 } | |
4680 | |
4681 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4682 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4683 put_region_on_unclean_list_locked(r); | |
4684 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4685 } | |
4686 | |
4687 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4688 MutexLockerEx x(Cleanup_mon); | |
4689 set_unclean_regions_coming_locked(b); | |
4690 } | |
4691 | |
4692 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4693 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4694 _unclean_regions_coming = b; | |
4695 // Wake up mutator threads that might be waiting for completeCleanup to | |
4696 // finish. | |
4697 if (!b) Cleanup_mon->notify_all(); | |
4698 } | |
4699 | |
4700 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4701 MutexLockerEx x(Cleanup_mon); | |
4702 wait_for_cleanup_complete_locked(); | |
4703 } | |
4704 | |
4705 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4706 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4707 while (_unclean_regions_coming) { | |
4708 Cleanup_mon->wait(); | |
4709 } | |
4710 } | |
4711 | |
4712 void | |
4713 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4714 assert(ZF_mon->owned_by_self(), "precondition."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4715 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4716 if (r->is_gc_alloc_region()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4717 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4718 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4719 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4720 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4721 region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4722 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4723 #endif |
342 | 4724 _unclean_region_list.insert_before_head(r); |
4725 } | |
4726 | |
4727 void | |
4728 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4729 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4730 prepend_region_list_on_unclean_list_locked(list); | |
4731 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4732 } | |
4733 | |
4734 void | |
4735 G1CollectedHeap:: | |
4736 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4737 assert(ZF_mon->owned_by_self(), "precondition."); | |
4738 _unclean_region_list.prepend_list(list); | |
4739 } | |
4740 | |
4741 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4742 assert(ZF_mon->owned_by_self(), "precondition."); | |
4743 HeapRegion* res = _unclean_region_list.pop(); | |
4744 if (res != NULL) { | |
4745 // Inform ZF thread that there's a new unclean head. | |
4746 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4747 ZF_mon->notify_all(); | |
4748 } | |
4749 return res; | |
4750 } | |
4751 | |
4752 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4753 assert(ZF_mon->owned_by_self(), "precondition."); | |
4754 return _unclean_region_list.hd(); | |
4755 } | |
4756 | |
4757 | |
4758 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4759 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4760 HeapRegion* r = peek_unclean_region_list_locked(); | |
4761 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
4762 // Result of below must be equal to "r", since we hold the lock. | |
4763 (void)pop_unclean_region_list_locked(); | |
4764 put_free_region_on_list_locked(r); | |
4765 return true; | |
4766 } else { | |
4767 return false; | |
4768 } | |
4769 } | |
4770 | |
4771 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
4772 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4773 return move_cleaned_region_to_free_list_locked(); | |
4774 } | |
4775 | |
4776 | |
4777 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
4778 assert(ZF_mon->owned_by_self(), "precondition."); | |
4779 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4780 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
4781 "Regions on free list must be zero filled"); | |
4782 assert(!r->isHumongous(), "Must not be humongous."); | |
4783 assert(r->is_empty(), "Better be empty"); | |
4784 assert(!r->is_on_free_list(), | |
4785 "Better not already be on free list"); | |
4786 assert(!r->is_on_unclean_list(), | |
4787 "Better not already be on unclean list"); | |
4788 r->set_on_free_list(true); | |
4789 r->set_next_on_free_list(_free_region_list); | |
4790 _free_region_list = r; | |
4791 _free_region_list_size++; | |
4792 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4793 } | |
4794 | |
4795 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
4796 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4797 put_free_region_on_list_locked(r); | |
4798 } | |
4799 | |
4800 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
4801 assert(ZF_mon->owned_by_self(), "precondition."); | |
4802 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4803 HeapRegion* res = _free_region_list; | |
4804 if (res != NULL) { | |
4805 _free_region_list = res->next_from_free_list(); | |
4806 _free_region_list_size--; | |
4807 res->set_on_free_list(false); | |
4808 res->set_next_on_free_list(NULL); | |
4809 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4810 } | |
4811 return res; | |
4812 } | |
4813 | |
4814 | |
4815 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
4816 // By self, or on behalf of self. | |
4817 assert(Heap_lock->is_locked(), "Precondition"); | |
4818 HeapRegion* res = NULL; | |
4819 bool first = true; | |
4820 while (res == NULL) { | |
4821 if (zero_filled || !first) { | |
4822 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4823 res = pop_free_region_list_locked(); | |
4824 if (res != NULL) { | |
4825 assert(!res->zero_fill_is_allocated(), | |
4826 "No allocated regions on free list."); | |
4827 res->set_zero_fill_allocated(); | |
4828 } else if (!first) { | |
4829 break; // We tried both, time to return NULL. | |
4830 } | |
4831 } | |
4832 | |
4833 if (res == NULL) { | |
4834 res = alloc_region_from_unclean_list(zero_filled); | |
4835 } | |
4836 assert(res == NULL || | |
4837 !zero_filled || | |
4838 res->zero_fill_is_allocated(), | |
4839 "We must have allocated the region we're returning"); | |
4840 first = false; | |
4841 } | |
4842 return res; | |
4843 } | |
4844 | |
4845 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
4846 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4847 { | |
4848 HeapRegion* prev = NULL; | |
4849 HeapRegion* cur = _unclean_region_list.hd(); | |
4850 while (cur != NULL) { | |
4851 HeapRegion* next = cur->next_from_unclean_list(); | |
4852 if (cur->zero_fill_is_allocated()) { | |
4853 // Remove from the list. | |
4854 if (prev == NULL) { | |
4855 (void)_unclean_region_list.pop(); | |
4856 } else { | |
4857 _unclean_region_list.delete_after(prev); | |
4858 } | |
4859 cur->set_on_unclean_list(false); | |
4860 cur->set_next_on_unclean_list(NULL); | |
4861 } else { | |
4862 prev = cur; | |
4863 } | |
4864 cur = next; | |
4865 } | |
4866 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
4867 "Inv"); | |
4868 } | |
4869 | |
4870 { | |
4871 HeapRegion* prev = NULL; | |
4872 HeapRegion* cur = _free_region_list; | |
4873 while (cur != NULL) { | |
4874 HeapRegion* next = cur->next_from_free_list(); | |
4875 if (cur->zero_fill_is_allocated()) { | |
4876 // Remove from the list. | |
4877 if (prev == NULL) { | |
4878 _free_region_list = cur->next_from_free_list(); | |
4879 } else { | |
4880 prev->set_next_on_free_list(cur->next_from_free_list()); | |
4881 } | |
4882 cur->set_on_free_list(false); | |
4883 cur->set_next_on_free_list(NULL); | |
4884 _free_region_list_size--; | |
4885 } else { | |
4886 prev = cur; | |
4887 } | |
4888 cur = next; | |
4889 } | |
4890 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4891 } | |
4892 } | |
4893 | |
4894 bool G1CollectedHeap::verify_region_lists() { | |
4895 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4896 return verify_region_lists_locked(); | |
4897 } | |
4898 | |
4899 bool G1CollectedHeap::verify_region_lists_locked() { | |
4900 HeapRegion* unclean = _unclean_region_list.hd(); | |
4901 while (unclean != NULL) { | |
4902 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
4903 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
4904 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
4905 "Everything else is possible."); | |
4906 unclean = unclean->next_from_unclean_list(); | |
4907 } | |
4908 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
4909 | |
4910 HeapRegion* free_r = _free_region_list; | |
4911 while (free_r != NULL) { | |
4912 assert(free_r->is_on_free_list(), "Well, it is!"); | |
4913 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
4914 switch (free_r->zero_fill_state()) { | |
4915 case HeapRegion::NotZeroFilled: | |
4916 case HeapRegion::ZeroFilling: | |
4917 guarantee(false, "Should not be on free list."); | |
4918 break; | |
4919 default: | |
4920 // Everything else is possible. | |
4921 break; | |
4922 } | |
4923 free_r = free_r->next_from_free_list(); | |
4924 } | |
4925 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
4926 // If we didn't do an assertion... | |
4927 return true; | |
4928 } | |
4929 | |
4930 size_t G1CollectedHeap::free_region_list_length() { | |
4931 assert(ZF_mon->owned_by_self(), "precondition."); | |
4932 size_t len = 0; | |
4933 HeapRegion* cur = _free_region_list; | |
4934 while (cur != NULL) { | |
4935 len++; | |
4936 cur = cur->next_from_free_list(); | |
4937 } | |
4938 return len; | |
4939 } | |
4940 | |
4941 size_t G1CollectedHeap::unclean_region_list_length() { | |
4942 assert(ZF_mon->owned_by_self(), "precondition."); | |
4943 return _unclean_region_list.length(); | |
4944 } | |
4945 | |
4946 size_t G1CollectedHeap::n_regions() { | |
4947 return _hrs->length(); | |
4948 } | |
4949 | |
4950 size_t G1CollectedHeap::max_regions() { | |
4951 return | |
4952 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
4953 HeapRegion::GrainBytes; | |
4954 } | |
4955 | |
4956 size_t G1CollectedHeap::free_regions() { | |
4957 /* Possibly-expensive assert. | |
4958 assert(_free_regions == count_free_regions(), | |
4959 "_free_regions is off."); | |
4960 */ | |
4961 return _free_regions; | |
4962 } | |
4963 | |
4964 bool G1CollectedHeap::should_zf() { | |
4965 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
4966 } | |
4967 | |
4968 class RegionCounter: public HeapRegionClosure { | |
4969 size_t _n; | |
4970 public: | |
4971 RegionCounter() : _n(0) {} | |
4972 bool doHeapRegion(HeapRegion* r) { | |
677 | 4973 if (r->is_empty()) { |
342 | 4974 assert(!r->isHumongous(), "H regions should not be empty."); |
4975 _n++; | |
4976 } | |
4977 return false; | |
4978 } | |
4979 int res() { return (int) _n; } | |
4980 }; | |
4981 | |
4982 size_t G1CollectedHeap::count_free_regions() { | |
4983 RegionCounter rc; | |
4984 heap_region_iterate(&rc); | |
4985 size_t n = rc.res(); | |
4986 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
4987 n--; | |
4988 return n; | |
4989 } | |
4990 | |
4991 size_t G1CollectedHeap::count_free_regions_list() { | |
4992 size_t n = 0; | |
4993 size_t o = 0; | |
4994 ZF_mon->lock_without_safepoint_check(); | |
4995 HeapRegion* cur = _free_region_list; | |
4996 while (cur != NULL) { | |
4997 cur = cur->next_from_free_list(); | |
4998 n++; | |
4999 } | |
5000 size_t m = unclean_region_list_length(); | |
5001 ZF_mon->unlock(); | |
5002 return n + m; | |
5003 } | |
5004 | |
5005 bool G1CollectedHeap::should_set_young_locked() { | |
5006 assert(heap_lock_held_for_gc(), | |
5007 "the heap lock should already be held by or for this thread"); | |
5008 return (g1_policy()->in_young_gc_mode() && | |
5009 g1_policy()->should_add_next_region_to_young_list()); | |
5010 } | |
5011 | |
5012 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5013 assert(heap_lock_held_for_gc(), | |
5014 "the heap lock should already be held by or for this thread"); | |
5015 _young_list->push_region(hr); | |
5016 g1_policy()->set_region_short_lived(hr); | |
5017 } | |
5018 | |
5019 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5020 private: | |
5021 bool _success; | |
5022 public: | |
5023 NoYoungRegionsClosure() : _success(true) { } | |
5024 bool doHeapRegion(HeapRegion* r) { | |
5025 if (r->is_young()) { | |
5026 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5027 r->bottom(), r->end()); | |
5028 _success = false; | |
5029 } | |
5030 return false; | |
5031 } | |
5032 bool success() { return _success; } | |
5033 }; | |
5034 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5035 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5036 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5037 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5038 if (check_heap) { |
342 | 5039 NoYoungRegionsClosure closure; |
5040 heap_region_iterate(&closure); | |
5041 ret = ret && closure.success(); | |
5042 } | |
5043 | |
5044 return ret; | |
5045 } | |
5046 | |
5047 void G1CollectedHeap::empty_young_list() { | |
5048 assert(heap_lock_held_for_gc(), | |
5049 "the heap lock should already be held by or for this thread"); | |
5050 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5051 | |
5052 _young_list->empty_list(); | |
5053 } | |
5054 | |
5055 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5056 bool no_allocs = true; | |
5057 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5058 HeapRegion* r = _gc_alloc_regions[ap]; | |
5059 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5060 } | |
5061 return no_allocs; | |
5062 } | |
5063 | |
545 | 5064 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5065 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5066 HeapRegion* r = _gc_alloc_regions[ap]; | |
5067 if (r != NULL) { | |
5068 // Check for aliases. | |
5069 bool has_processed_alias = false; | |
5070 for (int i = 0; i < ap; ++i) { | |
5071 if (_gc_alloc_regions[i] == r) { | |
5072 has_processed_alias = true; | |
5073 break; | |
5074 } | |
5075 } | |
5076 if (!has_processed_alias) { | |
545 | 5077 retire_alloc_region(r, false /* par */); |
342 | 5078 } |
5079 } | |
5080 } | |
5081 } | |
5082 | |
5083 | |
5084 // Done at the start of full GC. | |
5085 void G1CollectedHeap::tear_down_region_lists() { | |
5086 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5087 while (pop_unclean_region_list_locked() != NULL) ; | |
5088 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1394
diff
changeset
|
5089 "Postconditions of loop."); |
342 | 5090 while (pop_free_region_list_locked() != NULL) ; |
5091 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5092 if (_free_region_list_size != 0) { | |
5093 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
5094 print_on(gclog_or_tty, true /* extended */); |
342 | 5095 } |
5096 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5097 } | |
5098 | |
5099 | |
5100 class RegionResetter: public HeapRegionClosure { | |
5101 G1CollectedHeap* _g1; | |
5102 int _n; | |
5103 public: | |
5104 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5105 bool doHeapRegion(HeapRegion* r) { | |
5106 if (r->continuesHumongous()) return false; | |
5107 if (r->top() > r->bottom()) { | |
5108 if (r->top() < r->end()) { | |
5109 Copy::fill_to_words(r->top(), | |
5110 pointer_delta(r->end(), r->top())); | |
5111 } | |
5112 r->set_zero_fill_allocated(); | |
5113 } else { | |
5114 assert(r->is_empty(), "tautology"); | |
677 | 5115 _n++; |
5116 switch (r->zero_fill_state()) { | |
342 | 5117 case HeapRegion::NotZeroFilled: |
5118 case HeapRegion::ZeroFilling: | |
5119 _g1->put_region_on_unclean_list_locked(r); | |
5120 break; | |
5121 case HeapRegion::Allocated: | |
5122 r->set_zero_fill_complete(); | |
5123 // no break; go on to put on free list. | |
5124 case HeapRegion::ZeroFilled: | |
5125 _g1->put_free_region_on_list_locked(r); | |
5126 break; | |
5127 } | |
5128 } | |
5129 return false; | |
5130 } | |
5131 | |
5132 int getFreeRegionCount() {return _n;} | |
5133 }; | |
5134 | |
5135 // Done at the end of full GC. | |
5136 void G1CollectedHeap::rebuild_region_lists() { | |
5137 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5138 // This needs to go at the end of the full GC. | |
5139 RegionResetter rs; | |
5140 heap_region_iterate(&rs); | |
5141 _free_regions = rs.getFreeRegionCount(); | |
5142 // Tell the ZF thread it may have work to do. | |
5143 if (should_zf()) ZF_mon->notify_all(); | |
5144 } | |
5145 | |
5146 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5147 G1CollectedHeap* _g1; | |
5148 int _n; | |
5149 public: | |
5150 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5151 bool doHeapRegion(HeapRegion* r) { | |
5152 if (r->continuesHumongous()) return false; | |
5153 if (r->top() > r->bottom()) { | |
5154 // There are assertions in "set_zero_fill_needed()" below that | |
5155 // require top() == bottom(), so this is technically illegal. | |
5156 // We'll skirt the law here, by making that true temporarily. | |
5157 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5158 r->set_top(r->bottom())); | |
5159 r->set_zero_fill_needed(); | |
5160 DEBUG_ONLY(r->set_top(save_top)); | |
5161 } | |
5162 return false; | |
5163 } | |
5164 }; | |
5165 | |
5166 // Done at the start of full GC. | |
5167 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5168 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5169 // This needs to go at the end of the full GC. | |
5170 UsedRegionsNeedZeroFillSetter rs; | |
5171 heap_region_iterate(&rs); | |
5172 } | |
5173 | |
5174 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5175 _refine_cte_cl->set_concurrent(concurrent); | |
5176 } | |
5177 | |
5178 #ifndef PRODUCT | |
5179 | |
5180 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5181 public: | |
5182 bool doHeapRegion(HeapRegion *r) { | |
5183 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5184 if (r != NULL) { | |
5185 if (r->is_on_free_list()) | |
5186 gclog_or_tty->print("Free "); | |
5187 if (r->is_young()) | |
5188 gclog_or_tty->print("Young "); | |
5189 if (r->isHumongous()) | |
5190 gclog_or_tty->print("Is Humongous "); | |
5191 r->print(); | |
5192 } | |
5193 return false; | |
5194 } | |
5195 }; | |
5196 | |
5197 class SortHeapRegionClosure : public HeapRegionClosure { | |
5198 size_t young_regions,free_regions, unclean_regions; | |
5199 size_t hum_regions, count; | |
5200 size_t unaccounted, cur_unclean, cur_alloc; | |
5201 size_t total_free; | |
5202 HeapRegion* cur; | |
5203 public: | |
5204 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5205 free_regions(0), unclean_regions(0), | |
5206 hum_regions(0), | |
5207 count(0), unaccounted(0), | |
5208 cur_alloc(0), total_free(0) | |
5209 {} | |
5210 bool doHeapRegion(HeapRegion *r) { | |
5211 count++; | |
5212 if (r->is_on_free_list()) free_regions++; | |
5213 else if (r->is_on_unclean_list()) unclean_regions++; | |
5214 else if (r->isHumongous()) hum_regions++; | |
5215 else if (r->is_young()) young_regions++; | |
5216 else if (r == cur) cur_alloc++; | |
5217 else unaccounted++; | |
5218 return false; | |
5219 } | |
5220 void print() { | |
5221 total_free = free_regions + unclean_regions; | |
5222 gclog_or_tty->print("%d regions\n", count); | |
5223 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5224 total_free, free_regions, unclean_regions); | |
5225 gclog_or_tty->print("%d humongous %d young\n", | |
5226 hum_regions, young_regions); | |
5227 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5228 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5229 } | |
5230 }; | |
5231 | |
5232 void G1CollectedHeap::print_region_counts() { | |
5233 SortHeapRegionClosure sc(_cur_alloc_region); | |
5234 PrintHeapRegionClosure cl; | |
5235 heap_region_iterate(&cl); | |
5236 heap_region_iterate(&sc); | |
5237 sc.print(); | |
5238 print_region_accounting_info(); | |
5239 }; | |
5240 | |
5241 bool G1CollectedHeap::regions_accounted_for() { | |
5242 // TODO: regions accounting for young/survivor/tenured | |
5243 return true; | |
5244 } | |
5245 | |
5246 bool G1CollectedHeap::print_region_accounting_info() { | |
5247 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5248 free_regions(), | |
5249 count_free_regions(), count_free_regions_list(), | |
5250 _free_region_list_size, _unclean_region_list.sz()); | |
5251 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5252 (_cur_alloc_region == NULL ? 0 : 1)); | |
5253 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5254 | |
5255 // TODO: check regions accounting for young/survivor/tenured | |
5256 return true; | |
5257 } | |
5258 | |
5259 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5260 HeapRegion* hr = heap_region_containing(p); | |
5261 if (hr == NULL) { | |
5262 return is_in_permanent(p); | |
5263 } else { | |
5264 return hr->is_in(p); | |
5265 } | |
5266 } | |
941 | 5267 #endif // !PRODUCT |
342 | 5268 |
5269 void G1CollectedHeap::g1_unimplemented() { | |
5270 // Unimplemented(); | |
5271 } |