Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1705:2d160770d2e5
6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp
author | johnc |
---|---|
date | Mon, 02 Aug 2010 12:51:43 -0700 |
parents | 5cbac8938c4c |
children | 0ce1569c90e5 |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
28 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
29 |
342 | 30 // turn it on so that the contents of the young list (scan-only / |
31 // to-be-collected) are printed at "strategic" points before / during | |
32 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
33 #define YOUNG_LIST_VERBOSE 0 |
342 | 34 // CURRENT STATUS |
35 // This file is under construction. Search for "FIXME". | |
36 | |
37 // INVARIANTS/NOTES | |
38 // | |
39 // All allocation activity covered by the G1CollectedHeap interface is | |
40 // serialized by acquiring the HeapLock. This happens in | |
41 // mem_allocate_work, which all such allocation functions call. | |
42 // (Note that this does not apply to TLAB allocation, which is not part | |
43 // of this interface: it is done by clients of this interface.) | |
44 | |
45 // Local to this file. | |
46 | |
47 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
48 SuspendibleThreadSet* _sts; | |
49 G1RemSet* _g1rs; | |
50 ConcurrentG1Refine* _cg1r; | |
51 bool _concurrent; | |
52 public: | |
53 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
54 G1RemSet* g1rs, | |
55 ConcurrentG1Refine* cg1r) : | |
56 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
57 {} | |
58 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 59 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
60 // This path is executed by the concurrent refine or mutator threads, | |
61 // concurrently, and so we do not care if card_ptr contains references | |
62 // that point into the collection set. | |
63 assert(!oops_into_cset, "should be"); | |
64 | |
342 | 65 if (_concurrent && _sts->should_yield()) { |
66 // Caller will actually yield. | |
67 return false; | |
68 } | |
69 // Otherwise, we finished successfully; return true. | |
70 return true; | |
71 } | |
72 void set_concurrent(bool b) { _concurrent = b; } | |
73 }; | |
74 | |
75 | |
76 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
77 int _calls; | |
78 G1CollectedHeap* _g1h; | |
79 CardTableModRefBS* _ctbs; | |
80 int _histo[256]; | |
81 public: | |
82 ClearLoggedCardTableEntryClosure() : | |
83 _calls(0) | |
84 { | |
85 _g1h = G1CollectedHeap::heap(); | |
86 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
87 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
88 } | |
89 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
90 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
91 _calls++; | |
92 unsigned char* ujb = (unsigned char*)card_ptr; | |
93 int ind = (int)(*ujb); | |
94 _histo[ind]++; | |
95 *card_ptr = -1; | |
96 } | |
97 return true; | |
98 } | |
99 int calls() { return _calls; } | |
100 void print_histo() { | |
101 gclog_or_tty->print_cr("Card table value histogram:"); | |
102 for (int i = 0; i < 256; i++) { | |
103 if (_histo[i] != 0) { | |
104 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
105 } | |
106 } | |
107 } | |
108 }; | |
109 | |
110 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
111 int _calls; | |
112 G1CollectedHeap* _g1h; | |
113 CardTableModRefBS* _ctbs; | |
114 public: | |
115 RedirtyLoggedCardTableEntryClosure() : | |
116 _calls(0) | |
117 { | |
118 _g1h = G1CollectedHeap::heap(); | |
119 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
120 } | |
121 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
122 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
123 _calls++; | |
124 *card_ptr = 0; | |
125 } | |
126 return true; | |
127 } | |
128 int calls() { return _calls; } | |
129 }; | |
130 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
131 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
132 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
133 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
134 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
135 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
136 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
137 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
138 |
342 | 139 YoungList::YoungList(G1CollectedHeap* g1h) |
140 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
141 _length(0), |
342 | 142 _last_sampled_rs_lengths(0), |
545 | 143 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 144 { |
145 guarantee( check_list_empty(false), "just making sure..." ); | |
146 } | |
147 | |
148 void YoungList::push_region(HeapRegion *hr) { | |
149 assert(!hr->is_young(), "should not already be young"); | |
150 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
151 | |
152 hr->set_next_young_region(_head); | |
153 _head = hr; | |
154 | |
155 hr->set_young(); | |
156 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
157 ++_length; | |
158 } | |
159 | |
160 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 161 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 162 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
163 | |
164 hr->set_next_young_region(_survivor_head); | |
165 if (_survivor_head == NULL) { | |
545 | 166 _survivor_tail = hr; |
342 | 167 } |
168 _survivor_head = hr; | |
169 | |
170 ++_survivor_length; | |
171 } | |
172 | |
173 void YoungList::empty_list(HeapRegion* list) { | |
174 while (list != NULL) { | |
175 HeapRegion* next = list->get_next_young_region(); | |
176 list->set_next_young_region(NULL); | |
177 list->uninstall_surv_rate_group(); | |
178 list->set_not_young(); | |
179 list = next; | |
180 } | |
181 } | |
182 | |
183 void YoungList::empty_list() { | |
184 assert(check_list_well_formed(), "young list should be well formed"); | |
185 | |
186 empty_list(_head); | |
187 _head = NULL; | |
188 _length = 0; | |
189 | |
190 empty_list(_survivor_head); | |
191 _survivor_head = NULL; | |
545 | 192 _survivor_tail = NULL; |
342 | 193 _survivor_length = 0; |
194 | |
195 _last_sampled_rs_lengths = 0; | |
196 | |
197 assert(check_list_empty(false), "just making sure..."); | |
198 } | |
199 | |
200 bool YoungList::check_list_well_formed() { | |
201 bool ret = true; | |
202 | |
203 size_t length = 0; | |
204 HeapRegion* curr = _head; | |
205 HeapRegion* last = NULL; | |
206 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
207 if (!curr->is_young()) { |
342 | 208 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
209 "incorrectly tagged (y: %d, surv: %d)", |
342 | 210 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
211 curr->is_young(), curr->is_survivor()); |
342 | 212 ret = false; |
213 } | |
214 ++length; | |
215 last = curr; | |
216 curr = curr->get_next_young_region(); | |
217 } | |
218 ret = ret && (length == _length); | |
219 | |
220 if (!ret) { | |
221 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
222 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
223 length, _length); | |
224 } | |
225 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
226 return ret; |
342 | 227 } |
228 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 bool YoungList::check_list_empty(bool check_sample) { |
342 | 230 bool ret = true; |
231 | |
232 if (_length != 0) { | |
233 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
234 _length); | |
235 ret = false; | |
236 } | |
237 if (check_sample && _last_sampled_rs_lengths != 0) { | |
238 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
239 ret = false; | |
240 } | |
241 if (_head != NULL) { | |
242 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
243 ret = false; | |
244 } | |
245 if (!ret) { | |
246 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
247 } | |
248 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
249 return ret; |
342 | 250 } |
251 | |
252 void | |
253 YoungList::rs_length_sampling_init() { | |
254 _sampled_rs_lengths = 0; | |
255 _curr = _head; | |
256 } | |
257 | |
258 bool | |
259 YoungList::rs_length_sampling_more() { | |
260 return _curr != NULL; | |
261 } | |
262 | |
263 void | |
264 YoungList::rs_length_sampling_next() { | |
265 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
266 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
267 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
268 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
269 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
270 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
272 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
273 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
274 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
275 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
276 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
277 |
342 | 278 _curr = _curr->get_next_young_region(); |
279 if (_curr == NULL) { | |
280 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
281 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
282 } | |
283 } | |
284 | |
285 void | |
286 YoungList::reset_auxilary_lists() { | |
287 guarantee( is_empty(), "young list should be empty" ); | |
288 assert(check_list_well_formed(), "young list should be well formed"); | |
289 | |
290 // Add survivor regions to SurvRateGroup. | |
291 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 292 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 |
342 | 294 for (HeapRegion* curr = _survivor_head; |
295 curr != NULL; | |
296 curr = curr->get_next_young_region()) { | |
297 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
300 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
301 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
302 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 303 } |
304 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
305 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
306 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
307 _length = _survivor_length; |
342 | 308 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
309 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
310 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
311 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
312 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
313 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
314 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
316 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
317 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
318 |
545 | 319 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 320 |
321 assert(check_list_well_formed(), "young list should be well formed"); | |
322 } | |
323 | |
324 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
325 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
326 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 327 |
328 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
329 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
330 HeapRegion *curr = lists[list]; | |
331 if (curr == NULL) | |
332 gclog_or_tty->print_cr(" empty"); | |
333 while (curr != NULL) { | |
334 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 "age: %4d, y: %d, surv: %d", |
342 | 336 curr->bottom(), curr->end(), |
337 curr->top(), | |
338 curr->prev_top_at_mark_start(), | |
339 curr->next_top_at_mark_start(), | |
340 curr->top_at_conc_mark_count(), | |
341 curr->age_in_surv_rate_group_cond(), | |
342 curr->is_young(), | |
343 curr->is_survivor()); | |
344 curr = curr->get_next_young_region(); | |
345 } | |
346 } | |
347 | |
348 gclog_or_tty->print_cr(""); | |
349 } | |
350 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
351 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
352 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
353 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
354 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
355 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
356 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
357 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
358 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
359 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
360 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
361 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
362 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
363 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
364 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
365 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
366 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
367 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
368 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
369 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
370 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
371 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
372 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
342 | 404 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 405 _cg1r->stop(); |
342 | 406 _czft->stop(); |
407 _cmThread->stop(); | |
408 } | |
409 | |
410 | |
411 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
412 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
413 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
414 | |
415 // Count the dirty cards at the start. | |
416 CountNonCleanMemRegionClosure count1(this); | |
417 ct_bs->mod_card_iterate(&count1); | |
418 int orig_count = count1.n(); | |
419 | |
420 // First clear the logged cards. | |
421 ClearLoggedCardTableEntryClosure clear; | |
422 dcqs.set_closure(&clear); | |
423 dcqs.apply_closure_to_all_completed_buffers(); | |
424 dcqs.iterate_closure_all_threads(false); | |
425 clear.print_histo(); | |
426 | |
427 // Now ensure that there's no dirty cards. | |
428 CountNonCleanMemRegionClosure count2(this); | |
429 ct_bs->mod_card_iterate(&count2); | |
430 if (count2.n() != 0) { | |
431 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
432 count2.n(), orig_count); | |
433 } | |
434 guarantee(count2.n() == 0, "Card table should be clean."); | |
435 | |
436 RedirtyLoggedCardTableEntryClosure redirty; | |
437 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
438 dcqs.apply_closure_to_all_completed_buffers(); | |
439 dcqs.iterate_closure_all_threads(false); | |
440 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
441 clear.calls(), orig_count); | |
442 guarantee(redirty.calls() == clear.calls(), | |
443 "Or else mechanism is broken."); | |
444 | |
445 CountNonCleanMemRegionClosure count3(this); | |
446 ct_bs->mod_card_iterate(&count3); | |
447 if (count3.n() != orig_count) { | |
448 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
449 orig_count, count3.n()); | |
450 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
451 } | |
452 | |
453 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
454 } | |
455 | |
456 // Private class members. | |
457 | |
458 G1CollectedHeap* G1CollectedHeap::_g1h; | |
459 | |
460 // Private methods. | |
461 | |
462 // Finds a HeapRegion that can be used to allocate a given size of block. | |
463 | |
464 | |
465 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
466 bool do_expand, | |
467 bool zero_filled) { | |
468 ConcurrentZFThread::note_region_alloc(); | |
469 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
470 if (res == NULL && do_expand) { | |
471 expand(word_size * HeapWordSize); | |
472 res = alloc_free_region_from_lists(zero_filled); | |
473 assert(res == NULL || | |
474 (!res->isHumongous() && | |
475 (!zero_filled || | |
476 res->zero_fill_state() == HeapRegion::Allocated)), | |
477 "Alloc Regions must be zero filled (and non-H)"); | |
478 } | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
479 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
480 if (res->is_empty()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
481 _free_regions--; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
482 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
483 assert(!res->isHumongous() && |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
484 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
485 err_msg("Non-young alloc Regions must be zero filled (and non-H):" |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
486 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
487 res->isHumongous(), zero_filled, res->zero_fill_state())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
488 assert(!res->is_on_unclean_list(), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
489 "Alloc Regions must not be on the unclean list"); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
490 if (G1PrintHeapRegions) { |
342 | 491 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
492 "top "PTR_FORMAT, | |
493 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
494 } | |
495 } | |
496 return res; | |
497 } | |
498 | |
499 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
500 size_t word_size, | |
501 bool zero_filled) { | |
502 HeapRegion* alloc_region = NULL; | |
503 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
504 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
505 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 506 alloc_region->set_survivor(); |
342 | 507 } |
508 ++_gc_alloc_region_counts[purpose]; | |
509 } else { | |
510 g1_policy()->note_alloc_region_limit_reached(purpose); | |
511 } | |
512 return alloc_region; | |
513 } | |
514 | |
515 // If could fit into free regions w/o expansion, try. | |
516 // Otherwise, if can expand, do so. | |
517 // Otherwise, if using ex regions might help, try with ex given back. | |
518 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
519 assert(regions_accounted_for(), "Region leakage!"); | |
520 | |
521 // We can't allocate H regions while cleanupComplete is running, since | |
522 // some of the regions we find to be empty might not yet be added to the | |
523 // unclean list. (If we're already at a safepoint, this call is | |
524 // unnecessary, not to mention wrong.) | |
525 if (!SafepointSynchronize::is_at_safepoint()) | |
526 wait_for_cleanup_complete(); | |
527 | |
528 size_t num_regions = | |
529 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
530 | |
531 // Special case if < one region??? | |
532 | |
533 // Remember the ft size. | |
534 size_t x_size = expansion_regions(); | |
535 | |
536 HeapWord* res = NULL; | |
537 bool eliminated_allocated_from_lists = false; | |
538 | |
539 // Can the allocation potentially fit in the free regions? | |
540 if (free_regions() >= num_regions) { | |
541 res = _hrs->obj_allocate(word_size); | |
542 } | |
543 if (res == NULL) { | |
544 // Try expansion. | |
545 size_t fs = _hrs->free_suffix(); | |
546 if (fs + x_size >= num_regions) { | |
547 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
548 res = _hrs->obj_allocate(word_size); | |
549 assert(res != NULL, "This should have worked."); | |
550 } else { | |
551 // Expansion won't help. Are there enough free regions if we get rid | |
552 // of reservations? | |
553 size_t avail = free_regions(); | |
554 if (avail >= num_regions) { | |
555 res = _hrs->obj_allocate(word_size); | |
556 if (res != NULL) { | |
557 remove_allocated_regions_from_lists(); | |
558 eliminated_allocated_from_lists = true; | |
559 } | |
560 } | |
561 } | |
562 } | |
563 if (res != NULL) { | |
564 // Increment by the number of regions allocated. | |
565 // FIXME: Assumes regions all of size GrainBytes. | |
566 #ifndef PRODUCT | |
567 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
568 HeapRegion::GrainWords)); | |
569 #endif | |
570 if (!eliminated_allocated_from_lists) | |
571 remove_allocated_regions_from_lists(); | |
572 _summary_bytes_used += word_size * HeapWordSize; | |
573 _free_regions -= num_regions; | |
574 _num_humongous_regions += (int) num_regions; | |
575 } | |
576 assert(regions_accounted_for(), "Region Leakage"); | |
577 return res; | |
578 } | |
579 | |
580 HeapWord* | |
581 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
582 bool permit_collection_pause) { | |
583 HeapWord* res = NULL; | |
584 HeapRegion* allocated_young_region = NULL; | |
585 | |
586 assert( SafepointSynchronize::is_at_safepoint() || | |
587 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
588 | |
589 if (isHumongous(word_size)) { | |
590 // Allocation of a humongous object can, in a sense, complete a | |
591 // partial region, if the previous alloc was also humongous, and | |
592 // caused the test below to succeed. | |
593 if (permit_collection_pause) | |
594 do_collection_pause_if_appropriate(word_size); | |
595 res = humongousObjAllocate(word_size); | |
596 assert(_cur_alloc_region == NULL | |
597 || !_cur_alloc_region->isHumongous(), | |
598 "Prevent a regression of this bug."); | |
599 | |
600 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
601 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
602 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
603 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
604 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
605 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
606 wait_for_cleanup_complete(); |
342 | 607 // If we do a collection pause, this will be reset to a non-NULL |
608 // value. If we don't, nulling here ensures that we allocate a new | |
609 // region below. | |
610 if (_cur_alloc_region != NULL) { | |
611 // We're finished with the _cur_alloc_region. | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
612 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
613 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
614 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
615 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
616 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
617 } |
342 | 618 _summary_bytes_used += _cur_alloc_region->used(); |
619 _cur_alloc_region = NULL; | |
620 } | |
621 assert(_cur_alloc_region == NULL, "Invariant."); | |
622 // Completion of a heap region is perhaps a good point at which to do | |
623 // a collection pause. | |
624 if (permit_collection_pause) | |
625 do_collection_pause_if_appropriate(word_size); | |
626 // Make sure we have an allocation region available. | |
627 if (_cur_alloc_region == NULL) { | |
628 if (!SafepointSynchronize::is_at_safepoint()) | |
629 wait_for_cleanup_complete(); | |
630 bool next_is_young = should_set_young_locked(); | |
631 // If the next region is not young, make sure it's zero-filled. | |
632 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
633 if (_cur_alloc_region != NULL) { | |
634 _summary_bytes_used -= _cur_alloc_region->used(); | |
635 if (next_is_young) { | |
636 set_region_short_lived_locked(_cur_alloc_region); | |
637 allocated_young_region = _cur_alloc_region; | |
638 } | |
639 } | |
640 } | |
641 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
642 "Prevent a regression of this bug."); | |
643 | |
644 // Now retry the allocation. | |
645 if (_cur_alloc_region != NULL) { | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
646 if (allocated_young_region != NULL) { |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
647 // We need to ensure that the store to top does not |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
648 // float above the setting of the young type. |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
649 OrderAccess::storestore(); |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
650 } |
342 | 651 res = _cur_alloc_region->allocate(word_size); |
652 } | |
653 } | |
654 | |
655 // NOTE: fails frequently in PRT | |
656 assert(regions_accounted_for(), "Region leakage!"); | |
657 | |
658 if (res != NULL) { | |
659 if (!SafepointSynchronize::is_at_safepoint()) { | |
660 assert( permit_collection_pause, "invariant" ); | |
661 assert( Heap_lock->owned_by_self(), "invariant" ); | |
662 Heap_lock->unlock(); | |
663 } | |
664 | |
665 if (allocated_young_region != NULL) { | |
666 HeapRegion* hr = allocated_young_region; | |
667 HeapWord* bottom = hr->bottom(); | |
668 HeapWord* end = hr->end(); | |
669 MemRegion mr(bottom, end); | |
670 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
671 } | |
672 } | |
673 | |
674 assert( SafepointSynchronize::is_at_safepoint() || | |
675 (res == NULL && Heap_lock->owned_by_self()) || | |
676 (res != NULL && !Heap_lock->owned_by_self()), | |
677 "post condition of the call" ); | |
678 | |
679 return res; | |
680 } | |
681 | |
682 HeapWord* | |
683 G1CollectedHeap::mem_allocate(size_t word_size, | |
684 bool is_noref, | |
685 bool is_tlab, | |
686 bool* gc_overhead_limit_was_exceeded) { | |
687 debug_only(check_for_valid_allocation_state()); | |
688 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
689 HeapWord* result = NULL; | |
690 | |
691 // Loop until the allocation is satisified, | |
692 // or unsatisfied after GC. | |
693 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
694 int gc_count_before; | |
695 { | |
696 Heap_lock->lock(); | |
697 result = attempt_allocation(word_size); | |
698 if (result != NULL) { | |
699 // attempt_allocation should have unlocked the heap lock | |
700 assert(is_in(result), "result not in heap"); | |
701 return result; | |
702 } | |
703 // Read the gc count while the heap lock is held. | |
704 gc_count_before = SharedHeap::heap()->total_collections(); | |
705 Heap_lock->unlock(); | |
706 } | |
707 | |
708 // Create the garbage collection operation... | |
709 VM_G1CollectForAllocation op(word_size, | |
710 gc_count_before); | |
711 | |
712 // ...and get the VM thread to execute it. | |
713 VMThread::execute(&op); | |
714 if (op.prologue_succeeded()) { | |
715 result = op.result(); | |
716 assert(result == NULL || is_in(result), "result not in heap"); | |
717 return result; | |
718 } | |
719 | |
720 // Give a warning if we seem to be looping forever. | |
721 if ((QueuedAllocationWarningCount > 0) && | |
722 (try_count % QueuedAllocationWarningCount == 0)) { | |
723 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
724 try_count); | |
725 } | |
726 } | |
727 } | |
728 | |
729 void G1CollectedHeap::abandon_cur_alloc_region() { | |
730 if (_cur_alloc_region != NULL) { | |
731 // We're finished with the _cur_alloc_region. | |
732 if (_cur_alloc_region->is_empty()) { | |
733 _free_regions++; | |
734 free_region(_cur_alloc_region); | |
735 } else { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
736 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
737 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
738 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
739 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
740 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
741 } |
342 | 742 _summary_bytes_used += _cur_alloc_region->used(); |
743 } | |
744 _cur_alloc_region = NULL; | |
745 } | |
746 } | |
747 | |
636 | 748 void G1CollectedHeap::abandon_gc_alloc_regions() { |
749 // first, make sure that the GC alloc region list is empty (it should!) | |
750 assert(_gc_alloc_region_list == NULL, "invariant"); | |
751 release_gc_alloc_regions(true /* totally */); | |
752 } | |
753 | |
342 | 754 class PostMCRemSetClearClosure: public HeapRegionClosure { |
755 ModRefBarrierSet* _mr_bs; | |
756 public: | |
757 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
758 bool doHeapRegion(HeapRegion* r) { | |
759 r->reset_gc_time_stamp(); | |
760 if (r->continuesHumongous()) | |
761 return false; | |
762 HeapRegionRemSet* hrrs = r->rem_set(); | |
763 if (hrrs != NULL) hrrs->clear(); | |
764 // You might think here that we could clear just the cards | |
765 // corresponding to the used region. But no: if we leave a dirty card | |
766 // in a region we might allocate into, then it would prevent that card | |
767 // from being enqueued, and cause it to be missed. | |
768 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
769 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
770 return false; | |
771 } | |
772 }; | |
773 | |
774 | |
775 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
776 ModRefBarrierSet* _mr_bs; | |
777 public: | |
778 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
779 bool doHeapRegion(HeapRegion* r) { | |
780 if (r->continuesHumongous()) return false; | |
781 if (r->used_region().word_size() != 0) { | |
782 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
783 } | |
784 return false; | |
785 } | |
786 }; | |
787 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
788 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
789 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
790 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
791 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
792 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
793 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
794 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
795 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
796 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
797 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
798 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
799 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
800 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
801 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
802 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
803 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
804 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
805 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
806 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
807 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
808 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
809 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
810 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
811 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
812 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
813 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
814 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
815 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
816 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
817 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
818 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
819 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
820 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
821 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
822 void G1CollectedHeap::do_collection(bool explicit_gc, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
823 bool clear_all_soft_refs, |
342 | 824 size_t word_size) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
825 if (GC_locker::check_active_before_gc()) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
826 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
827 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
828 |
342 | 829 ResourceMark rm; |
830 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
831 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
832 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
833 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
834 |
342 | 835 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
836 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
837 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
838 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
839 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
840 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
841 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
842 |
342 | 843 { |
844 IsGCActiveMark x; | |
845 | |
846 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
847 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
848 assert(!system_gc || explicit_gc, "invariant"); |
342 | 849 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
850 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
851 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
852 PrintGC, true, gclog_or_tty); |
342 | 853 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
854 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
855 |
342 | 856 double start = os::elapsedTime(); |
857 g1_policy()->record_full_collection_start(); | |
858 | |
859 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
860 increment_total_collections(true /* full gc */); |
342 | 861 |
862 size_t g1h_prev_used = used(); | |
863 assert(used() == recalculate_used(), "Should be equal"); | |
864 | |
865 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
866 HandleMark hm; // Discard invalid handles created during verification | |
867 prepare_for_verify(); | |
868 gclog_or_tty->print(" VerifyBeforeGC:"); | |
869 Universe::verify(true); | |
870 } | |
871 assert(regions_accounted_for(), "Region leakage!"); | |
872 | |
873 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
874 | |
875 // We want to discover references, but not process them yet. | |
876 // This mode is disabled in | |
877 // instanceRefKlass::process_discovered_references if the | |
878 // generation does some collection work, or | |
879 // instanceRefKlass::enqueue_discovered_references if the | |
880 // generation returns without doing any work. | |
881 ref_processor()->disable_discovery(); | |
882 ref_processor()->abandon_partial_discovery(); | |
883 ref_processor()->verify_no_references_recorded(); | |
884 | |
885 // Abandon current iterations of concurrent marking and concurrent | |
886 // refinement, if any are in progress. | |
887 concurrent_mark()->abort(); | |
888 | |
889 // Make sure we'll choose a new allocation region afterwards. | |
890 abandon_cur_alloc_region(); | |
636 | 891 abandon_gc_alloc_regions(); |
342 | 892 assert(_cur_alloc_region == NULL, "Invariant."); |
893 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
894 tear_down_region_lists(); | |
895 set_used_regions_to_need_zero_fill(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
896 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
897 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
898 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
899 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
900 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
901 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
902 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
903 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
904 |
342 | 905 if (g1_policy()->in_young_gc_mode()) { |
906 empty_young_list(); | |
907 g1_policy()->set_full_young_gcs(true); | |
908 } | |
909 | |
910 // Temporarily make reference _discovery_ single threaded (non-MT). | |
911 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
912 | |
913 // Temporarily make refs discovery atomic | |
914 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
915 | |
916 // Temporarily clear _is_alive_non_header | |
917 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
918 | |
919 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
920 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 921 |
922 // Do collection work | |
923 { | |
924 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
925 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 926 } |
927 // Because freeing humongous regions may have added some unclean | |
928 // regions, it is necessary to tear down again before rebuilding. | |
929 tear_down_region_lists(); | |
930 rebuild_region_lists(); | |
931 | |
932 _summary_bytes_used = recalculate_used(); | |
933 | |
934 ref_processor()->enqueue_discovered_references(); | |
935 | |
936 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
937 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
938 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
939 |
342 | 940 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
941 HandleMark hm; // Discard invalid handles created during verification | |
942 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
943 prepare_for_verify(); |
342 | 944 Universe::verify(false); |
945 } | |
946 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
947 | |
948 reset_gc_time_stamp(); | |
949 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
950 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
951 // sets. We will also reset the GC time stamps of the regions. |
342 | 952 PostMCRemSetClearClosure rs_clear(mr_bs()); |
953 heap_region_iterate(&rs_clear); | |
954 | |
955 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
956 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 957 |
958 if (_cg1r->use_cache()) { | |
959 _cg1r->clear_and_record_card_counts(); | |
960 _cg1r->clear_hot_cache(); | |
961 } | |
962 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
963 // Rebuild remembered sets of all regions. |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
964 if (ParallelGCThreads > 0) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
965 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
966 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
967 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
968 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
969 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
970 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
971 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
972 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
973 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
974 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
975 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
976 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
977 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
978 |
342 | 979 if (PrintGC) { |
980 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
981 } | |
982 | |
983 if (true) { // FIXME | |
984 // Ask the permanent generation to adjust size for full collections | |
985 perm()->compute_new_size(); | |
986 } | |
987 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
988 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
989 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
990 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
991 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
992 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
993 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
994 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
995 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
996 |
342 | 997 double end = os::elapsedTime(); |
998 g1_policy()->record_full_collection_end(); | |
999 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1000 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1001 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1002 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1003 |
342 | 1004 gc_epilogue(true); |
1005 | |
794 | 1006 // Discard all rset updates |
1007 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1008 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1009 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1010 assert(regions_accounted_for(), "Region leakage!"); |
1011 } | |
1012 | |
1013 if (g1_policy()->in_young_gc_mode()) { | |
1014 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1015 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1016 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1017 assert( check_young_list_empty(true /* check_heap */), |
342 | 1018 "young list should be empty at this point"); |
1019 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1020 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1021 // Update the number of full collections that have been completed. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1022 increment_full_collections_completed(false /* outer */); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1023 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1024 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1025 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1026 } |
342 | 1027 } |
1028 | |
1029 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1030 do_collection(true, /* explicit_gc */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1031 clear_all_soft_refs, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1032 0 /* word_size */); |
342 | 1033 } |
1034 | |
1035 // This code is mostly copied from TenuredGeneration. | |
1036 void | |
1037 G1CollectedHeap:: | |
1038 resize_if_necessary_after_full_collection(size_t word_size) { | |
1039 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1040 | |
1041 // Include the current allocation, if any, and bytes that will be | |
1042 // pre-allocated to support collections, as "used". | |
1043 const size_t used_after_gc = used(); | |
1044 const size_t capacity_after_gc = capacity(); | |
1045 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1046 | |
1047 // We don't have floating point command-line arguments | |
1048 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; | |
1049 const double maximum_used_percentage = 1.0 - minimum_free_percentage; | |
1050 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
1051 const double minimum_used_percentage = 1.0 - maximum_free_percentage; | |
1052 | |
1053 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); | |
1054 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); | |
1055 | |
1056 // Don't shrink less than the initial size. | |
1057 minimum_desired_capacity = | |
1058 MAX2(minimum_desired_capacity, | |
1059 collector_policy()->initial_heap_byte_size()); | |
1060 maximum_desired_capacity = | |
1061 MAX2(maximum_desired_capacity, | |
1062 collector_policy()->initial_heap_byte_size()); | |
1063 | |
1064 // We are failing here because minimum_desired_capacity is | |
1065 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); | |
1066 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); | |
1067 | |
1068 if (PrintGC && Verbose) { | |
1069 const double free_percentage = ((double)free_after_gc) / capacity(); | |
1070 gclog_or_tty->print_cr("Computing new size after full GC "); | |
1071 gclog_or_tty->print_cr(" " | |
1072 " minimum_free_percentage: %6.2f", | |
1073 minimum_free_percentage); | |
1074 gclog_or_tty->print_cr(" " | |
1075 " maximum_free_percentage: %6.2f", | |
1076 maximum_free_percentage); | |
1077 gclog_or_tty->print_cr(" " | |
1078 " capacity: %6.1fK" | |
1079 " minimum_desired_capacity: %6.1fK" | |
1080 " maximum_desired_capacity: %6.1fK", | |
1081 capacity() / (double) K, | |
1082 minimum_desired_capacity / (double) K, | |
1083 maximum_desired_capacity / (double) K); | |
1084 gclog_or_tty->print_cr(" " | |
1085 " free_after_gc : %6.1fK" | |
1086 " used_after_gc : %6.1fK", | |
1087 free_after_gc / (double) K, | |
1088 used_after_gc / (double) K); | |
1089 gclog_or_tty->print_cr(" " | |
1090 " free_percentage: %6.2f", | |
1091 free_percentage); | |
1092 } | |
1093 if (capacity() < minimum_desired_capacity) { | |
1094 // Don't expand unless it's significant | |
1095 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1096 expand(expand_bytes); | |
1097 if (PrintGC && Verbose) { | |
1098 gclog_or_tty->print_cr(" expanding:" | |
1099 " minimum_desired_capacity: %6.1fK" | |
1100 " expand_bytes: %6.1fK", | |
1101 minimum_desired_capacity / (double) K, | |
1102 expand_bytes / (double) K); | |
1103 } | |
1104 | |
1105 // No expansion, now see if we want to shrink | |
1106 } else if (capacity() > maximum_desired_capacity) { | |
1107 // Capacity too large, compute shrinking size | |
1108 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1109 shrink(shrink_bytes); | |
1110 if (PrintGC && Verbose) { | |
1111 gclog_or_tty->print_cr(" " | |
1112 " shrinking:" | |
1113 " initSize: %.1fK" | |
1114 " maximum_desired_capacity: %.1fK", | |
1115 collector_policy()->initial_heap_byte_size() / (double) K, | |
1116 maximum_desired_capacity / (double) K); | |
1117 gclog_or_tty->print_cr(" " | |
1118 " shrink_bytes: %.1fK", | |
1119 shrink_bytes / (double) K); | |
1120 } | |
1121 } | |
1122 } | |
1123 | |
1124 | |
1125 HeapWord* | |
1126 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1127 HeapWord* result = NULL; | |
1128 | |
1129 // In a G1 heap, we're supposed to keep allocation from failing by | |
1130 // incremental pauses. Therefore, at least for now, we'll favor | |
1131 // expansion over collection. (This might change in the future if we can | |
1132 // do something smarter than full collection to satisfy a failed alloc.) | |
1133 | |
1134 result = expand_and_allocate(word_size); | |
1135 if (result != NULL) { | |
1136 assert(is_in(result), "result not in heap"); | |
1137 return result; | |
1138 } | |
1139 | |
1140 // OK, I guess we have to try collection. | |
1141 | |
1142 do_collection(false, false, word_size); | |
1143 | |
1144 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1145 | |
1146 if (result != NULL) { | |
1147 assert(is_in(result), "result not in heap"); | |
1148 return result; | |
1149 } | |
1150 | |
1151 // Try collecting soft references. | |
1152 do_collection(false, true, word_size); | |
1153 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1154 if (result != NULL) { | |
1155 assert(is_in(result), "result not in heap"); | |
1156 return result; | |
1157 } | |
1158 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1159 assert(!collector_policy()->should_clear_all_soft_refs(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1160 "Flag should have been handled and cleared prior to this point"); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1161 |
342 | 1162 // What else? We might try synchronous finalization later. If the total |
1163 // space available is large enough for the allocation, then a more | |
1164 // complete compaction phase than we've tried so far might be | |
1165 // appropriate. | |
1166 return NULL; | |
1167 } | |
1168 | |
1169 // Attempting to expand the heap sufficiently | |
1170 // to support an allocation of the given "word_size". If | |
1171 // successful, perform the allocation and return the address of the | |
1172 // allocated block, or else "NULL". | |
1173 | |
1174 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1175 size_t expand_bytes = word_size * HeapWordSize; | |
1176 if (expand_bytes < MinHeapDeltaBytes) { | |
1177 expand_bytes = MinHeapDeltaBytes; | |
1178 } | |
1179 expand(expand_bytes); | |
1180 assert(regions_accounted_for(), "Region leakage!"); | |
1181 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1182 return result; | |
1183 } | |
1184 | |
1185 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1186 size_t pre_used = 0; | |
1187 size_t cleared_h_regions = 0; | |
1188 size_t freed_regions = 0; | |
1189 UncleanRegionList local_list; | |
1190 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1191 freed_regions, &local_list); | |
1192 | |
1193 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1194 &local_list); | |
1195 return pre_used; | |
1196 } | |
1197 | |
1198 void | |
1199 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1200 size_t& pre_used, | |
1201 size_t& cleared_h, | |
1202 size_t& freed_regions, | |
1203 UncleanRegionList* list, | |
1204 bool par) { | |
1205 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1206 size_t res = 0; | |
677 | 1207 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1208 !hr->is_young()) { | |
1209 if (G1PolicyVerbose > 0) | |
1210 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1211 " during cleanup", hr, hr->used()); | |
1212 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1213 } |
1214 } | |
1215 | |
1216 // FIXME: both this and shrink could probably be more efficient by | |
1217 // doing one "VirtualSpace::expand_by" call rather than several. | |
1218 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1219 size_t old_mem_size = _g1_storage.committed_size(); | |
1220 // We expand by a minimum of 1K. | |
1221 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1222 size_t aligned_expand_bytes = | |
1223 ReservedSpace::page_align_size_up(expand_bytes); | |
1224 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1225 HeapRegion::GrainBytes); | |
1226 expand_bytes = aligned_expand_bytes; | |
1227 while (expand_bytes > 0) { | |
1228 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1229 // Commit more storage. | |
1230 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1231 if (!successful) { | |
1232 expand_bytes = 0; | |
1233 } else { | |
1234 expand_bytes -= HeapRegion::GrainBytes; | |
1235 // Expand the committed region. | |
1236 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1237 _g1_committed.set_end(high); | |
1238 // Create a new HeapRegion. | |
1239 MemRegion mr(base, high); | |
1240 bool is_zeroed = !_g1_max_committed.contains(base); | |
1241 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1242 | |
1243 // Now update max_committed if necessary. | |
1244 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1245 | |
1246 // Add it to the HeapRegionSeq. | |
1247 _hrs->insert(hr); | |
1248 // Set the zero-fill state, according to whether it's already | |
1249 // zeroed. | |
1250 { | |
1251 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1252 if (is_zeroed) { | |
1253 hr->set_zero_fill_complete(); | |
1254 put_free_region_on_list_locked(hr); | |
1255 } else { | |
1256 hr->set_zero_fill_needed(); | |
1257 put_region_on_unclean_list_locked(hr); | |
1258 } | |
1259 } | |
1260 _free_regions++; | |
1261 // And we used up an expansion region to create it. | |
1262 _expansion_regions--; | |
1263 // Tell the cardtable about it. | |
1264 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1265 // And the offset table as well. | |
1266 _bot_shared->resize(_g1_committed.word_size()); | |
1267 } | |
1268 } | |
1269 if (Verbose && PrintGC) { | |
1270 size_t new_mem_size = _g1_storage.committed_size(); | |
1271 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1272 old_mem_size/K, aligned_expand_bytes/K, | |
1273 new_mem_size/K); | |
1274 } | |
1275 } | |
1276 | |
1277 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1278 { | |
1279 size_t old_mem_size = _g1_storage.committed_size(); | |
1280 size_t aligned_shrink_bytes = | |
1281 ReservedSpace::page_align_size_down(shrink_bytes); | |
1282 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1283 HeapRegion::GrainBytes); | |
1284 size_t num_regions_deleted = 0; | |
1285 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1286 | |
1287 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1288 if (mr.byte_size() > 0) | |
1289 _g1_storage.shrink_by(mr.byte_size()); | |
1290 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1291 | |
1292 _g1_committed.set_end(mr.start()); | |
1293 _free_regions -= num_regions_deleted; | |
1294 _expansion_regions += num_regions_deleted; | |
1295 | |
1296 // Tell the cardtable about it. | |
1297 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1298 | |
1299 // And the offset table as well. | |
1300 _bot_shared->resize(_g1_committed.word_size()); | |
1301 | |
1302 HeapRegionRemSet::shrink_heap(n_regions()); | |
1303 | |
1304 if (Verbose && PrintGC) { | |
1305 size_t new_mem_size = _g1_storage.committed_size(); | |
1306 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1307 old_mem_size/K, aligned_shrink_bytes/K, | |
1308 new_mem_size/K); | |
1309 } | |
1310 } | |
1311 | |
1312 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1313 release_gc_alloc_regions(true /* totally */); |
342 | 1314 tear_down_region_lists(); // We will rebuild them in a moment. |
1315 shrink_helper(shrink_bytes); | |
1316 rebuild_region_lists(); | |
1317 } | |
1318 | |
1319 // Public methods. | |
1320 | |
1321 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1322 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1323 #endif // _MSC_VER | |
1324 | |
1325 | |
1326 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1327 SharedHeap(policy_), | |
1328 _g1_policy(policy_), | |
1111 | 1329 _dirty_card_queue_set(false), |
1705 | 1330 _into_cset_dirty_card_queue_set(false), |
342 | 1331 _ref_processor(NULL), |
1332 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1333 _bot_shared(NULL), | |
1334 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1335 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1336 _evac_failure_scan_stack(NULL) , | |
1337 _mark_in_progress(false), | |
1338 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1339 _cur_alloc_region(NULL), | |
1340 _refine_cte_cl(NULL), | |
1341 _free_region_list(NULL), _free_region_list_size(0), | |
1342 _free_regions(0), | |
1343 _full_collection(false), | |
1344 _unclean_region_list(), | |
1345 _unclean_regions_coming(false), | |
1346 _young_list(new YoungList(this)), | |
1347 _gc_time_stamp(0), | |
526 | 1348 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1349 _full_collections_completed(0), |
526 | 1350 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1351 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1352 _dirty_cards_region_list(NULL) { |
342 | 1353 _g1h = this; // To catch bugs. |
1354 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1355 vm_exit_during_initialization("Failed necessary allocation."); | |
1356 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1357 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1358 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1359 |
342 | 1360 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1361 _task_queues = new RefToScanQueueSet(n_queues); | |
1362 | |
1363 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1364 assert(n_rem_sets > 0, "Invariant."); | |
1365 | |
1366 HeapRegionRemSetIterator** iter_arr = | |
1367 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1368 for (int i = 0; i < n_queues; i++) { | |
1369 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1370 } | |
1371 _rem_set_iterator = iter_arr; | |
1372 | |
1373 for (int i = 0; i < n_queues; i++) { | |
1374 RefToScanQueue* q = new RefToScanQueue(); | |
1375 q->initialize(); | |
1376 _task_queues->register_queue(i, q); | |
1377 } | |
1378 | |
1379 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1380 _gc_alloc_regions[ap] = NULL; |
1381 _gc_alloc_region_counts[ap] = 0; | |
1382 _retained_gc_alloc_regions[ap] = NULL; | |
1383 // by default, we do not retain a GC alloc region for each ap; | |
1384 // we'll override this, when appropriate, below | |
1385 _retain_gc_alloc_region[ap] = false; | |
1386 } | |
1387 | |
1388 // We will try to remember the last half-full tenured region we | |
1389 // allocated to at the end of a collection so that we can re-use it | |
1390 // during the next collection. | |
1391 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1392 | |
342 | 1393 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1394 } | |
1395 | |
1396 jint G1CollectedHeap::initialize() { | |
1166 | 1397 CollectedHeap::pre_initialize(); |
342 | 1398 os::enable_vtime(); |
1399 | |
1400 // Necessary to satisfy locking discipline assertions. | |
1401 | |
1402 MutexLocker x(Heap_lock); | |
1403 | |
1404 // While there are no constraints in the GC code that HeapWordSize | |
1405 // be any particular value, there are multiple other areas in the | |
1406 // system which believe this to be true (e.g. oop->object_size in some | |
1407 // cases incorrectly returns the size in wordSize units rather than | |
1408 // HeapWordSize). | |
1409 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1410 | |
1411 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1412 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1413 | |
1414 // Ensure that the sizes are properly aligned. | |
1415 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1416 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1417 | |
1418 _cg1r = new ConcurrentG1Refine(); | |
1419 | |
1420 // Reserve the maximum. | |
1421 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1422 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1423 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1424 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1425 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1426 |
342 | 1427 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1428 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1429 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1430 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1431 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1432 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1433 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1434 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1435 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1436 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1437 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1438 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1439 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1440 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1441 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1442 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1443 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1444 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1445 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1446 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1447 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1448 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1449 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1450 } |
342 | 1451 |
1452 if (!heap_rs.is_reserved()) { | |
1453 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1454 return JNI_ENOMEM; | |
1455 } | |
1456 | |
1457 // It is important to do this in a way such that concurrent readers can't | |
1458 // temporarily think somethings in the heap. (I've actually seen this | |
1459 // happen in asserts: DLD.) | |
1460 _reserved.set_word_size(0); | |
1461 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1462 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1463 | |
1464 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1465 | |
1466 _num_humongous_regions = 0; | |
1467 | |
1468 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1469 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1470 set_barrier_set(rem_set()->bs()); | |
1471 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1472 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1473 } else { | |
1474 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1475 return JNI_ENOMEM; | |
1476 } | |
1477 | |
1478 // Also create a G1 rem set. | |
1479 if (G1UseHRIntoRS) { | |
1480 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1481 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1482 } else { | |
1483 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1484 return JNI_ENOMEM; | |
1485 } | |
1486 } else { | |
1487 _g1_rem_set = new StupidG1RemSet(this); | |
1488 } | |
1489 | |
1490 // Carve out the G1 part of the heap. | |
1491 | |
1492 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1493 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1494 g1_rs.size()/HeapWordSize); | |
1495 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1496 | |
1497 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1498 | |
1499 _g1_storage.initialize(g1_rs, 0); | |
1500 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1501 _g1_max_committed = _g1_committed; | |
393 | 1502 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1503 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1504 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1505 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1506 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1507 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1508 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1509 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1510 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1511 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1512 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1513 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1514 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1515 |
342 | 1516 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1517 heap_word_size(init_byte_size)); | |
1518 | |
1519 _g1h = this; | |
1520 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1521 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1522 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1523 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1524 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1525 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1526 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1527 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1528 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1529 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1530 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1531 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1532 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1533 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1534 |
342 | 1535 // Create the ConcurrentMark data structure and thread. |
1536 // (Must do this late, so that "max_regions" is defined.) | |
1537 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1538 _cmThread = _cm->cmThread(); | |
1539 | |
1540 // ...and the concurrent zero-fill thread, if necessary. | |
1541 if (G1ConcZeroFill) { | |
1542 _czft = new ConcurrentZFThread(); | |
1543 } | |
1544 | |
1545 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1546 HeapRegionRemSet::init_heap(max_regions()); | |
1547 | |
677 | 1548 // Now expand into the initial heap size. |
1549 expand(init_byte_size); | |
342 | 1550 |
1551 // Perform any initialization actions delegated to the policy. | |
1552 g1_policy()->init(); | |
1553 | |
1554 g1_policy()->note_start_of_mark_thread(); | |
1555 | |
1556 _refine_cte_cl = | |
1557 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1558 g1_rem_set(), | |
1559 concurrent_g1_refine()); | |
1560 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1561 | |
1562 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1563 SATB_Q_FL_lock, | |
1111 | 1564 G1SATBProcessCompletedThreshold, |
342 | 1565 Shared_SATB_Q_lock); |
794 | 1566 |
1567 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1568 DirtyCardQ_FL_lock, | |
1111 | 1569 concurrent_g1_refine()->yellow_zone(), |
1570 concurrent_g1_refine()->red_zone(), | |
794 | 1571 Shared_DirtyCardQ_lock); |
1572 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1573 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1574 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1575 DirtyCardQ_FL_lock, |
1111 | 1576 -1, // never trigger processing |
1577 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1578 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1579 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1580 } |
1705 | 1581 |
1582 // Initialize the card queue set used to hold cards containing | |
1583 // references into the collection set. | |
1584 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1585 DirtyCardQ_FL_lock, | |
1586 -1, // never trigger processing | |
1587 -1, // no limit on length | |
1588 Shared_DirtyCardQ_lock, | |
1589 &JavaThread::dirty_card_queue_set()); | |
1590 | |
342 | 1591 // In case we're keeping closure specialization stats, initialize those |
1592 // counts and that mechanism. | |
1593 SpecializationStats::clear(); | |
1594 | |
1595 _gc_alloc_region_list = NULL; | |
1596 | |
1597 // Do later initialization work for concurrent refinement. | |
1598 _cg1r->init(); | |
1599 | |
1600 return JNI_OK; | |
1601 } | |
1602 | |
1603 void G1CollectedHeap::ref_processing_init() { | |
1604 SharedHeap::ref_processing_init(); | |
1605 MemRegion mr = reserved_region(); | |
1606 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1607 mr, // span | |
1608 false, // Reference discovery is not atomic | |
1609 // (though it shouldn't matter here.) | |
1610 true, // mt_discovery | |
1611 NULL, // is alive closure: need to fill this in for efficiency | |
1612 ParallelGCThreads, | |
1613 ParallelRefProcEnabled, | |
1614 true); // Setting next fields of discovered | |
1615 // lists requires a barrier. | |
1616 } | |
1617 | |
1618 size_t G1CollectedHeap::capacity() const { | |
1619 return _g1_committed.byte_size(); | |
1620 } | |
1621 | |
1705 | 1622 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
1623 DirtyCardQueue* into_cset_dcq, | |
1624 bool concurrent, | |
342 | 1625 int worker_i) { |
889 | 1626 // Clean cards in the hot card cache |
1705 | 1627 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 1628 |
342 | 1629 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
1630 int n_completed_buffers = 0; | |
1705 | 1631 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 1632 n_completed_buffers++; |
1633 } | |
1634 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1635 (double) n_completed_buffers); | |
1636 dcqs.clear_n_completed_buffers(); | |
1637 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1638 } | |
1639 | |
1640 | |
1641 // Computes the sum of the storage used by the various regions. | |
1642 | |
1643 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1644 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1645 "Should be owned on this thread's behalf."); |
342 | 1646 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1647 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1648 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1649 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1650 result += hr->used(); |
342 | 1651 return result; |
1652 } | |
1653 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1654 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1655 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1656 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1657 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1658 |
342 | 1659 class SumUsedClosure: public HeapRegionClosure { |
1660 size_t _used; | |
1661 public: | |
1662 SumUsedClosure() : _used(0) {} | |
1663 bool doHeapRegion(HeapRegion* r) { | |
1664 if (!r->continuesHumongous()) { | |
1665 _used += r->used(); | |
1666 } | |
1667 return false; | |
1668 } | |
1669 size_t result() { return _used; } | |
1670 }; | |
1671 | |
1672 size_t G1CollectedHeap::recalculate_used() const { | |
1673 SumUsedClosure blk; | |
1674 _hrs->iterate(&blk); | |
1675 return blk.result(); | |
1676 } | |
1677 | |
1678 #ifndef PRODUCT | |
1679 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1680 size_t _num; | |
1681 public: | |
677 | 1682 SumUsedRegionsClosure() : _num(0) {} |
342 | 1683 bool doHeapRegion(HeapRegion* r) { |
1684 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1685 _num += 1; | |
1686 } | |
1687 return false; | |
1688 } | |
1689 size_t result() { return _num; } | |
1690 }; | |
1691 | |
1692 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1693 SumUsedRegionsClosure blk; | |
1694 _hrs->iterate(&blk); | |
1695 return blk.result(); | |
1696 } | |
1697 #endif // PRODUCT | |
1698 | |
1699 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1700 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1701 // otherwise, is there space in the current allocation region? | |
1702 | |
1703 // We need to store the current allocation region in a local variable | |
1704 // here. The problem is that this method doesn't take any locks and | |
1705 // there may be other threads which overwrite the current allocation | |
1706 // region field. attempt_allocation(), for example, sets it to NULL | |
1707 // and this can happen *after* the NULL check here but before the call | |
1708 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1709 // to be a problem in the optimized build, since the two loads of the | |
1710 // current allocation region field are optimized away. | |
1711 HeapRegion* car = _cur_alloc_region; | |
1712 | |
1713 // FIXME: should iterate over all regions? | |
1714 if (car == NULL) { | |
1715 return 0; | |
1716 } | |
1717 return car->free(); | |
1718 } | |
1719 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1720 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1721 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1722 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1723 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1724 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1725 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1726 void G1CollectedHeap::increment_full_collections_completed(bool outer) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1727 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1728 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1729 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1730 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1731 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1732 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1733 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1734 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1735 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1736 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1737 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1738 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1739 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1740 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1741 // This is the case for the inner caller, i.e. a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1742 assert(outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1743 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1744 (full_collections_started == _full_collections_completed + 2), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1745 err_msg("for inner caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1746 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1747 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1748 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1749 // This is the case for the outer caller, i.e. the concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1750 assert(!outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1751 (full_collections_started == _full_collections_completed + 1), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1752 err_msg("for outer caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1753 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1754 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1755 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1756 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1757 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1758 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1759 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1760 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1761 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1762 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1763 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1764 |
342 | 1765 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
1766 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1767 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1768 GCCauseSetter gcs(this, cause); | |
1769 switch (cause) { | |
1770 case GCCause::_heap_inspection: | |
1771 case GCCause::_heap_dump: { | |
1772 HandleMark hm; | |
1773 do_full_collection(false); // don't clear all soft refs | |
1774 break; | |
1775 } | |
1776 default: // XXX FIX ME | |
1777 ShouldNotReachHere(); // Unexpected use of this function | |
1778 } | |
1779 } | |
1780 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1781 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1782 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1783 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1784 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1785 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1786 unsigned int full_gc_count_before; |
342 | 1787 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1788 MutexLocker ml(Heap_lock); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1789 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1790 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1791 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1792 |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1793 // Don't want to do a GC until cleanup is completed. |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1794 wait_for_cleanup_complete(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1795 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1796 // We give up heap lock; VMThread::execute gets it back below |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1797 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1798 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1799 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1800 // Schedule an initial-mark evacuation pause that will start a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1801 // concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1802 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1803 true, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1804 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1805 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1806 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1807 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1808 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1809 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1810 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1811 // Schedule a standard evacuation pause. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1812 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1813 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1814 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1815 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1816 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1817 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1818 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1819 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1820 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1821 } |
342 | 1822 } |
1823 } | |
1824 | |
1825 bool G1CollectedHeap::is_in(const void* p) const { | |
1826 if (_g1_committed.contains(p)) { | |
1827 HeapRegion* hr = _hrs->addr_to_region(p); | |
1828 return hr->is_in(p); | |
1829 } else { | |
1830 return _perm_gen->as_gen()->is_in(p); | |
1831 } | |
1832 } | |
1833 | |
1834 // Iteration functions. | |
1835 | |
1836 // Iterates an OopClosure over all ref-containing fields of objects | |
1837 // within a HeapRegion. | |
1838 | |
1839 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1840 MemRegion _mr; | |
1841 OopClosure* _cl; | |
1842 public: | |
1843 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1844 : _mr(mr), _cl(cl) {} | |
1845 bool doHeapRegion(HeapRegion* r) { | |
1846 if (! r->continuesHumongous()) { | |
1847 r->oop_iterate(_cl); | |
1848 } | |
1849 return false; | |
1850 } | |
1851 }; | |
1852 | |
678 | 1853 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 1854 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
1855 _hrs->iterate(&blk); | |
678 | 1856 if (do_perm) { |
1857 perm_gen()->oop_iterate(cl); | |
1858 } | |
342 | 1859 } |
1860 | |
678 | 1861 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 1862 IterateOopClosureRegionClosure blk(mr, cl); |
1863 _hrs->iterate(&blk); | |
678 | 1864 if (do_perm) { |
1865 perm_gen()->oop_iterate(cl); | |
1866 } | |
342 | 1867 } |
1868 | |
1869 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1870 | |
1871 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1872 ObjectClosure* _cl; | |
1873 public: | |
1874 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1875 bool doHeapRegion(HeapRegion* r) { | |
1876 if (! r->continuesHumongous()) { | |
1877 r->object_iterate(_cl); | |
1878 } | |
1879 return false; | |
1880 } | |
1881 }; | |
1882 | |
678 | 1883 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 1884 IterateObjectClosureRegionClosure blk(cl); |
1885 _hrs->iterate(&blk); | |
678 | 1886 if (do_perm) { |
1887 perm_gen()->object_iterate(cl); | |
1888 } | |
342 | 1889 } |
1890 | |
1891 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1892 // FIXME: is this right? | |
1893 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1894 } | |
1895 | |
1896 // Calls a SpaceClosure on a HeapRegion. | |
1897 | |
1898 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1899 SpaceClosure* _cl; | |
1900 public: | |
1901 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1902 bool doHeapRegion(HeapRegion* r) { | |
1903 _cl->do_space(r); | |
1904 return false; | |
1905 } | |
1906 }; | |
1907 | |
1908 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1909 SpaceClosureRegionClosure blk(cl); | |
1910 _hrs->iterate(&blk); | |
1911 } | |
1912 | |
1913 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1914 _hrs->iterate(cl); | |
1915 } | |
1916 | |
1917 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1918 HeapRegionClosure* cl) { | |
1919 _hrs->iterate_from(r, cl); | |
1920 } | |
1921 | |
1922 void | |
1923 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1924 _hrs->iterate_from(idx, cl); | |
1925 } | |
1926 | |
1927 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1928 | |
1929 void | |
1930 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1931 int worker, | |
1932 jint claim_value) { | |
355 | 1933 const size_t regions = n_regions(); |
1934 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1935 // try to spread out the starting points of the workers | |
1936 const size_t start_index = regions / worker_num * (size_t) worker; | |
1937 | |
1938 // each worker will actually look at all regions | |
1939 for (size_t count = 0; count < regions; ++count) { | |
1940 const size_t index = (start_index + count) % regions; | |
1941 assert(0 <= index && index < regions, "sanity"); | |
1942 HeapRegion* r = region_at(index); | |
1943 // we'll ignore "continues humongous" regions (we'll process them | |
1944 // when we come across their corresponding "start humongous" | |
1945 // region) and regions already claimed | |
1946 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1947 continue; | |
1948 } | |
1949 // OK, try to claim it | |
342 | 1950 if (r->claimHeapRegion(claim_value)) { |
355 | 1951 // success! |
1952 assert(!r->continuesHumongous(), "sanity"); | |
1953 if (r->startsHumongous()) { | |
1954 // If the region is "starts humongous" we'll iterate over its | |
1955 // "continues humongous" first; in fact we'll do them | |
1956 // first. The order is important. In on case, calling the | |
1957 // closure on the "starts humongous" region might de-allocate | |
1958 // and clear all its "continues humongous" regions and, as a | |
1959 // result, we might end up processing them twice. So, we'll do | |
1960 // them first (notice: most closures will ignore them anyway) and | |
1961 // then we'll do the "starts humongous" region. | |
1962 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1963 HeapRegion* chr = region_at(ch_index); | |
1964 | |
1965 // if the region has already been claimed or it's not | |
1966 // "continues humongous" we're done | |
1967 if (chr->claim_value() == claim_value || | |
1968 !chr->continuesHumongous()) { | |
1969 break; | |
1970 } | |
1971 | |
1972 // Noone should have claimed it directly. We can given | |
1973 // that we claimed its "starts humongous" region. | |
1974 assert(chr->claim_value() != claim_value, "sanity"); | |
1975 assert(chr->humongous_start_region() == r, "sanity"); | |
1976 | |
1977 if (chr->claimHeapRegion(claim_value)) { | |
1978 // we should always be able to claim it; noone else should | |
1979 // be trying to claim this region | |
1980 | |
1981 bool res2 = cl->doHeapRegion(chr); | |
1982 assert(!res2, "Should not abort"); | |
1983 | |
1984 // Right now, this holds (i.e., no closure that actually | |
1985 // does something with "continues humongous" regions | |
1986 // clears them). We might have to weaken it in the future, | |
1987 // but let's leave these two asserts here for extra safety. | |
1988 assert(chr->continuesHumongous(), "should still be the case"); | |
1989 assert(chr->humongous_start_region() == r, "sanity"); | |
1990 } else { | |
1991 guarantee(false, "we should not reach here"); | |
1992 } | |
1993 } | |
1994 } | |
1995 | |
1996 assert(!r->continuesHumongous(), "sanity"); | |
1997 bool res = cl->doHeapRegion(r); | |
1998 assert(!res, "Should not abort"); | |
1999 } | |
2000 } | |
2001 } | |
2002 | |
390 | 2003 class ResetClaimValuesClosure: public HeapRegionClosure { |
2004 public: | |
2005 bool doHeapRegion(HeapRegion* r) { | |
2006 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2007 return false; | |
2008 } | |
2009 }; | |
2010 | |
2011 void | |
2012 G1CollectedHeap::reset_heap_region_claim_values() { | |
2013 ResetClaimValuesClosure blk; | |
2014 heap_region_iterate(&blk); | |
2015 } | |
2016 | |
355 | 2017 #ifdef ASSERT |
2018 // This checks whether all regions in the heap have the correct claim | |
2019 // value. I also piggy-backed on this a check to ensure that the | |
2020 // humongous_start_region() information on "continues humongous" | |
2021 // regions is correct. | |
2022 | |
2023 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2024 private: | |
2025 jint _claim_value; | |
2026 size_t _failures; | |
2027 HeapRegion* _sh_region; | |
2028 public: | |
2029 CheckClaimValuesClosure(jint claim_value) : | |
2030 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2031 bool doHeapRegion(HeapRegion* r) { | |
2032 if (r->claim_value() != _claim_value) { | |
2033 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2034 "claim value = %d, should be %d", | |
2035 r->bottom(), r->end(), r->claim_value(), | |
2036 _claim_value); | |
2037 ++_failures; | |
2038 } | |
2039 if (!r->isHumongous()) { | |
2040 _sh_region = NULL; | |
2041 } else if (r->startsHumongous()) { | |
2042 _sh_region = r; | |
2043 } else if (r->continuesHumongous()) { | |
2044 if (r->humongous_start_region() != _sh_region) { | |
2045 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2046 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2047 r->bottom(), r->end(), | |
2048 r->humongous_start_region(), | |
2049 _sh_region); | |
2050 ++_failures; | |
342 | 2051 } |
2052 } | |
355 | 2053 return false; |
2054 } | |
2055 size_t failures() { | |
2056 return _failures; | |
2057 } | |
2058 }; | |
2059 | |
2060 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2061 CheckClaimValuesClosure cl(claim_value); | |
2062 heap_region_iterate(&cl); | |
2063 return cl.failures() == 0; | |
2064 } | |
2065 #endif // ASSERT | |
342 | 2066 |
2067 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2068 HeapRegion* r = g1_policy()->collection_set(); | |
2069 while (r != NULL) { | |
2070 HeapRegion* next = r->next_in_collection_set(); | |
2071 if (cl->doHeapRegion(r)) { | |
2072 cl->incomplete(); | |
2073 return; | |
2074 } | |
2075 r = next; | |
2076 } | |
2077 } | |
2078 | |
2079 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2080 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2081 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2082 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2083 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2084 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2085 |
342 | 2086 assert(r->in_collection_set(), |
2087 "Start region must be a member of the collection set."); | |
2088 HeapRegion* cur = r; | |
2089 while (cur != NULL) { | |
2090 HeapRegion* next = cur->next_in_collection_set(); | |
2091 if (cl->doHeapRegion(cur) && false) { | |
2092 cl->incomplete(); | |
2093 return; | |
2094 } | |
2095 cur = next; | |
2096 } | |
2097 cur = g1_policy()->collection_set(); | |
2098 while (cur != r) { | |
2099 HeapRegion* next = cur->next_in_collection_set(); | |
2100 if (cl->doHeapRegion(cur) && false) { | |
2101 cl->incomplete(); | |
2102 return; | |
2103 } | |
2104 cur = next; | |
2105 } | |
2106 } | |
2107 | |
2108 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2109 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2110 } | |
2111 | |
2112 | |
2113 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2114 Space* res = heap_region_containing(addr); | |
2115 if (res == NULL) | |
2116 res = perm_gen()->space_containing(addr); | |
2117 return res; | |
2118 } | |
2119 | |
2120 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2121 Space* sp = space_containing(addr); | |
2122 if (sp != NULL) { | |
2123 return sp->block_start(addr); | |
2124 } | |
2125 return NULL; | |
2126 } | |
2127 | |
2128 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2129 Space* sp = space_containing(addr); | |
2130 assert(sp != NULL, "block_size of address outside of heap"); | |
2131 return sp->block_size(addr); | |
2132 } | |
2133 | |
2134 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2135 Space* sp = space_containing(addr); | |
2136 return sp->block_is_obj(addr); | |
2137 } | |
2138 | |
2139 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2140 return true; | |
2141 } | |
2142 | |
2143 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2144 return HeapRegion::GrainBytes; | |
2145 } | |
2146 | |
2147 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2148 // Return the remaining space in the cur alloc region, but not less than | |
2149 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2150 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2151 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2152 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2153 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2154 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2155 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2156 // between when we test for NULL and when we use it later. |
342 | 2157 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2158 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2159 |
342 | 2160 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2161 return max_tlab_size; |
342 | 2162 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2163 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2164 max_tlab_size); |
342 | 2165 } |
2166 } | |
2167 | |
2168 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
2169 bool dummy; | |
2170 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
2171 } | |
2172 | |
2173 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2174 return false; | |
2175 } | |
2176 | |
2177 size_t G1CollectedHeap::large_typearray_limit() { | |
2178 // FIXME | |
2179 return HeapRegion::GrainBytes/HeapWordSize; | |
2180 } | |
2181 | |
2182 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2183 return g1_reserved_obj_bytes(); |
342 | 2184 } |
2185 | |
2186 jlong G1CollectedHeap::millis_since_last_gc() { | |
2187 // assert(false, "NYI"); | |
2188 return 0; | |
2189 } | |
2190 | |
2191 | |
2192 void G1CollectedHeap::prepare_for_verify() { | |
2193 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2194 ensure_parsability(false); | |
2195 } | |
2196 g1_rem_set()->prepare_for_verify(); | |
2197 } | |
2198 | |
2199 class VerifyLivenessOopClosure: public OopClosure { | |
2200 G1CollectedHeap* g1h; | |
2201 public: | |
2202 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2203 g1h = _g1h; | |
2204 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2205 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2206 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2207 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2208 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2209 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2210 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2211 "Dead object referenced by a not dead object"); |
342 | 2212 } |
2213 }; | |
2214 | |
2215 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2216 private: |
342 | 2217 G1CollectedHeap* _g1h; |
2218 size_t _live_bytes; | |
2219 HeapRegion *_hr; | |
811 | 2220 bool _use_prev_marking; |
342 | 2221 public: |
811 | 2222 // use_prev_marking == true -> use "prev" marking information, |
2223 // use_prev_marking == false -> use "next" marking information | |
2224 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2225 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2226 _g1h = G1CollectedHeap::heap(); |
2227 } | |
2228 void do_object(oop o) { | |
2229 VerifyLivenessOopClosure isLive(_g1h); | |
2230 assert(o != NULL, "Huh?"); | |
811 | 2231 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2232 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2233 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2234 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2235 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2236 } |
342 | 2237 } |
2238 } | |
2239 size_t live_bytes() { return _live_bytes; } | |
2240 }; | |
2241 | |
2242 class PrintObjsInRegionClosure : public ObjectClosure { | |
2243 HeapRegion *_hr; | |
2244 G1CollectedHeap *_g1; | |
2245 public: | |
2246 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2247 _g1 = G1CollectedHeap::heap(); | |
2248 }; | |
2249 | |
2250 void do_object(oop o) { | |
2251 if (o != NULL) { | |
2252 HeapWord *start = (HeapWord *) o; | |
2253 size_t word_sz = o->size(); | |
2254 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2255 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2256 (void*) o, word_sz, | |
2257 _g1->isMarkedPrev(o), | |
2258 _g1->isMarkedNext(o), | |
2259 _hr->obj_allocated_since_prev_marking(o)); | |
2260 HeapWord *end = start + word_sz; | |
2261 HeapWord *cur; | |
2262 int *val; | |
2263 for (cur = start; cur < end; cur++) { | |
2264 val = (int *) cur; | |
2265 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2266 } | |
2267 } | |
2268 } | |
2269 }; | |
2270 | |
2271 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2272 private: |
342 | 2273 bool _allow_dirty; |
390 | 2274 bool _par; |
811 | 2275 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2276 bool _failures; |
811 | 2277 public: |
2278 // use_prev_marking == true -> use "prev" marking information, | |
2279 // use_prev_marking == false -> use "next" marking information | |
2280 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2281 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2282 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2283 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2284 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2285 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2286 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2287 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2288 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2289 |
342 | 2290 bool doHeapRegion(HeapRegion* r) { |
390 | 2291 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2292 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2293 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2294 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2295 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2296 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2297 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2298 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2299 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2300 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2301 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2302 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2303 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2304 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2305 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2306 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2307 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2308 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2309 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2310 } |
342 | 2311 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2312 return false; // stop the region iteration if we hit a failure |
342 | 2313 } |
2314 }; | |
2315 | |
2316 class VerifyRootsClosure: public OopsInGenClosure { | |
2317 private: | |
2318 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2319 bool _use_prev_marking; |
342 | 2320 bool _failures; |
2321 public: | |
811 | 2322 // use_prev_marking == true -> use "prev" marking information, |
2323 // use_prev_marking == false -> use "next" marking information | |
2324 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2325 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2326 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2327 _failures(false) { } |
342 | 2328 |
2329 bool failures() { return _failures; } | |
2330 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2331 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2332 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2333 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2334 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2335 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2336 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2337 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2338 obj->print_on(gclog_or_tty); |
2339 _failures = true; | |
2340 } | |
2341 } | |
2342 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2343 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2344 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2345 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2346 }; |
2347 | |
390 | 2348 // This is the task used for parallel heap verification. |
2349 | |
2350 class G1ParVerifyTask: public AbstractGangTask { | |
2351 private: | |
2352 G1CollectedHeap* _g1h; | |
2353 bool _allow_dirty; | |
811 | 2354 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2355 bool _failures; |
390 | 2356 |
2357 public: | |
811 | 2358 // use_prev_marking == true -> use "prev" marking information, |
2359 // use_prev_marking == false -> use "next" marking information | |
2360 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2361 bool use_prev_marking) : | |
390 | 2362 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2363 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2364 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2365 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2366 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2367 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2368 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2369 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2370 } |
390 | 2371 |
2372 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2373 HandleMark hm; |
811 | 2374 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2375 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2376 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2377 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2378 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2379 } |
390 | 2380 } |
2381 }; | |
2382 | |
342 | 2383 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2384 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2385 } | |
2386 | |
2387 void G1CollectedHeap::verify(bool allow_dirty, | |
2388 bool silent, | |
2389 bool use_prev_marking) { | |
342 | 2390 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2391 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2392 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2393 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2394 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2395 false, |
342 | 2396 SharedHeap::SO_AllClasses, |
2397 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2398 &blobsCl, |
342 | 2399 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2400 bool failures = rootsCl.failures(); |
342 | 2401 rem_set()->invalidate(perm_gen()->used_region(), false); |
2402 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2403 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2404 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2405 "sanity check"); | |
2406 | |
811 | 2407 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2408 int n_workers = workers()->total_workers(); |
2409 set_par_threads(n_workers); | |
2410 workers()->run_task(&task); | |
2411 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2412 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2413 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2414 } |
390 | 2415 |
2416 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2417 "sanity check"); | |
2418 | |
2419 reset_heap_region_claim_values(); | |
2420 | |
2421 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2422 "sanity check"); | |
2423 } else { | |
811 | 2424 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2425 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2426 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2427 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2428 } |
390 | 2429 } |
342 | 2430 if (!silent) gclog_or_tty->print("remset "); |
2431 rem_set()->verify(); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2432 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2433 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2434 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2435 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2436 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2437 #ifndef PRODUCT |
1044 | 2438 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2439 concurrent_mark()->print_reachable("at-verification-failure", |
2440 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2441 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2442 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2443 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2444 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2445 guarantee(!failures, "there should not have been any failures"); |
342 | 2446 } else { |
2447 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2448 } | |
2449 } | |
2450 | |
2451 class PrintRegionClosure: public HeapRegionClosure { | |
2452 outputStream* _st; | |
2453 public: | |
2454 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2455 bool doHeapRegion(HeapRegion* r) { | |
2456 r->print_on(_st); | |
2457 return false; | |
2458 } | |
2459 }; | |
2460 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2461 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2462 |
2463 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2464 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2465 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2466 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2467 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2468 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2469 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2470 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2471 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2472 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2473 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2474 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2475 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2476 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2477 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2478 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2479 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2480 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2481 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2482 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2483 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2484 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2485 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2486 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2487 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2488 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2489 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2490 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2491 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2492 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2493 PrintRegionClosure blk(st); |
2494 _hrs->iterate(&blk); | |
2495 } | |
2496 | |
2497 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
2498 if (ParallelGCThreads > 0) { | |
1019 | 2499 workers()->print_worker_threads_on(st); |
2500 } | |
2501 | |
2502 _cmThread->print_on(st); | |
342 | 2503 st->cr(); |
1019 | 2504 |
2505 _cm->print_worker_threads_on(st); | |
2506 | |
2507 _cg1r->print_worker_threads_on(st); | |
2508 | |
342 | 2509 _czft->print_on(st); |
2510 st->cr(); | |
2511 } | |
2512 | |
2513 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2514 if (ParallelGCThreads > 0) { | |
2515 workers()->threads_do(tc); | |
2516 } | |
2517 tc->do_thread(_cmThread); | |
794 | 2518 _cg1r->threads_do(tc); |
342 | 2519 tc->do_thread(_czft); |
2520 } | |
2521 | |
2522 void G1CollectedHeap::print_tracing_info() const { | |
2523 // We'll overload this to mean "trace GC pause statistics." | |
2524 if (TraceGen0Time || TraceGen1Time) { | |
2525 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2526 // to that. | |
2527 g1_policy()->print_tracing_info(); | |
2528 } | |
751 | 2529 if (G1SummarizeRSetStats) { |
342 | 2530 g1_rem_set()->print_summary_info(); |
2531 } | |
1282 | 2532 if (G1SummarizeConcMark) { |
342 | 2533 concurrent_mark()->print_summary_info(); |
2534 } | |
751 | 2535 if (G1SummarizeZFStats) { |
342 | 2536 ConcurrentZFThread::print_summary_info(); |
2537 } | |
2538 g1_policy()->print_yg_surv_rate_info(); | |
2539 | |
2540 SpecializationStats::print(); | |
2541 } | |
2542 | |
2543 | |
2544 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2545 HeapRegion* hr = heap_region_containing(addr); | |
2546 if (hr == NULL) { | |
2547 return 0; | |
2548 } else { | |
2549 return 1; | |
2550 } | |
2551 } | |
2552 | |
2553 G1CollectedHeap* G1CollectedHeap::heap() { | |
2554 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2555 "not a garbage-first heap"); | |
2556 return _g1h; | |
2557 } | |
2558 | |
2559 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2560 // always_do_update_barrier = false; |
342 | 2561 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
2562 // Call allocation profiler | |
2563 AllocationProfiler::iterate_since_last_gc(); | |
2564 // Fill TLAB's and such | |
2565 ensure_parsability(true); | |
2566 } | |
2567 | |
2568 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2569 // FIXME: what is this about? | |
2570 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2571 // is set. | |
2572 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2573 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2574 // always_do_update_barrier = true; |
342 | 2575 } |
2576 | |
2577 void G1CollectedHeap::do_collection_pause() { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2578 assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2579 |
342 | 2580 // Read the GC count while holding the Heap_lock |
2581 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2582 // ensure that we do not give up the heap lock and potentially | |
2583 // pick up the wrong count | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2584 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); |
342 | 2585 |
2586 // Don't want to do a GC pause while cleanup is being completed! | |
2587 wait_for_cleanup_complete(); | |
2588 | |
2589 g1_policy()->record_stop_world_start(); | |
2590 { | |
2591 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2592 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2593 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2594 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2595 GCCause::_g1_inc_collection_pause); |
342 | 2596 VMThread::execute(&op); |
2597 } | |
2598 } | |
2599 | |
2600 void | |
2601 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2602 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2603 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2604 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2605 CGC_lock->notify(); |
342 | 2606 } |
2607 } | |
2608 | |
2609 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2610 G1CollectedHeap* _g1h; | |
2611 public: | |
2612 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2613 void do_object(oop obj) { | |
2614 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2615 "markandsweep mark should agree with concurrent deadness"); | |
2616 } | |
2617 }; | |
2618 | |
2619 void | |
2620 G1CollectedHeap::checkConcurrentMark() { | |
2621 VerifyMarkedObjsClosure verifycl(this); | |
2622 // MutexLockerEx x(getMarkBitMapLock(), | |
2623 // Mutex::_no_safepoint_check_flag); | |
678 | 2624 object_iterate(&verifycl, false); |
342 | 2625 } |
2626 | |
2627 void G1CollectedHeap::do_sync_mark() { | |
2628 _cm->checkpointRootsInitial(); | |
2629 _cm->markFromRoots(); | |
2630 _cm->checkpointRootsFinal(false); | |
2631 } | |
2632 | |
2633 // <NEW PREDICTION> | |
2634 | |
2635 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2636 bool young) { | |
2637 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2638 } | |
2639 | |
2640 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2641 predicted_time_ms) { | |
2642 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2643 } | |
2644 | |
2645 size_t G1CollectedHeap::pending_card_num() { | |
2646 size_t extra_cards = 0; | |
2647 JavaThread *curr = Threads::first(); | |
2648 while (curr != NULL) { | |
2649 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2650 extra_cards += dcq.size(); | |
2651 curr = curr->next(); | |
2652 } | |
2653 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2654 size_t buffer_size = dcqs.buffer_size(); | |
2655 size_t buffer_num = dcqs.completed_buffers_num(); | |
2656 return buffer_size * buffer_num + extra_cards; | |
2657 } | |
2658 | |
2659 size_t G1CollectedHeap::max_pending_card_num() { | |
2660 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2661 size_t buffer_size = dcqs.buffer_size(); | |
2662 size_t buffer_num = dcqs.completed_buffers_num(); | |
2663 int thread_num = Threads::number_of_threads(); | |
2664 return (buffer_num + thread_num) * buffer_size; | |
2665 } | |
2666 | |
2667 size_t G1CollectedHeap::cards_scanned() { | |
2668 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2669 return g1_rset->cardsScanned(); | |
2670 } | |
2671 | |
2672 void | |
2673 G1CollectedHeap::setup_surviving_young_words() { | |
2674 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2675 size_t array_length = g1_policy()->young_cset_length(); | |
2676 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2677 if (_surviving_young_words == NULL) { | |
2678 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2679 "Not enough space for young surv words summary."); | |
2680 } | |
2681 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2682 #ifdef ASSERT |
342 | 2683 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2684 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2685 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2686 #endif // !ASSERT |
342 | 2687 } |
2688 | |
2689 void | |
2690 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2691 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2692 size_t array_length = g1_policy()->young_cset_length(); | |
2693 for (size_t i = 0; i < array_length; ++i) | |
2694 _surviving_young_words[i] += surv_young_words[i]; | |
2695 } | |
2696 | |
2697 void | |
2698 G1CollectedHeap::cleanup_surviving_young_words() { | |
2699 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2700 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2701 _surviving_young_words = NULL; | |
2702 } | |
2703 | |
2704 // </NEW PREDICTION> | |
2705 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2706 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2707 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2708 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2709 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2710 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2711 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2712 |
342 | 2713 void |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2714 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2715 if (GC_locker::check_active_before_gc()) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2716 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2717 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2718 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2719 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2720 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2721 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2722 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2723 { |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2724 ResourceMark rm; |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2725 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2726 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2727 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2728 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2729 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2730 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2731 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2732 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2733 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2734 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2735 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2736 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2737 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2738 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2739 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2740 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2741 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2742 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2743 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2744 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2745 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2746 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2747 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2748 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2749 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2750 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2751 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2752 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2753 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2754 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2755 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2756 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2757 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2758 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2759 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2760 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2761 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2762 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2763 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2764 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2765 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2766 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2767 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2768 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2769 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2770 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2771 increment_total_collections(false /* full gc */); |
342 | 2772 |
2773 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2774 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2775 print(); |
342 | 2776 #endif |
2777 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2778 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2779 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2780 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2781 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2782 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2783 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2784 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2785 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2786 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2787 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2788 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2789 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2790 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2791 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2792 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2793 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2794 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2795 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2796 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2797 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2798 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2799 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2800 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2801 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2802 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2803 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2804 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2805 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2806 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2807 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2808 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2809 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2810 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2811 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2812 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2813 #endif // YOUNG_LIST_VERBOSE |
342 | 2814 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2815 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2816 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2817 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2818 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2819 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2820 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2821 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2822 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2823 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2824 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2825 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2826 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2827 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2828 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2829 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2830 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2831 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2832 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2833 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2834 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2835 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2836 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2837 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2838 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2839 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2840 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2841 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2842 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2843 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2844 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2845 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2846 // Now choose the CS. We may abandon a pause if we find no |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2847 // region that will fit in the MMU pause. |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2848 bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2849 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2850 // Nothing to do if we were unable to choose a collection set. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2851 if (!abandoned) { |
342 | 2852 #if G1_REM_SET_LOGGING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2853 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2854 print(); |
342 | 2855 #endif |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2856 PrepareForRSScanningClosure prepare_for_rs_scan; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2857 collection_set_iterate(&prepare_for_rs_scan); |
342 | 2858 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2859 setup_surviving_young_words(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2860 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2861 // Set up the gc allocation regions. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2862 get_gc_alloc_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2863 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2864 // Actually do the work... |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2865 evacuate_collection_set(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2866 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2867 free_collection_set(g1_policy()->collection_set()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2868 g1_policy()->clear_collection_set(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2869 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2870 cleanup_surviving_young_words(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2871 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2872 // Start a new incremental collection set for the next pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2873 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2874 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2875 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2876 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2877 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2878 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2879 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2880 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2881 _young_list->reset_sampled_info(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2882 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2883 // Don't check the whole heap at this point as the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2884 // GC alloc regions from this pause have been tagged |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2885 // as survivors and moved on to the survivor list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2886 // Survivor regions will fail the !is_young() check. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2887 assert(check_young_list_empty(false /* check_heap */), |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2888 "young list should be empty"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2889 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2890 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2891 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2892 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2893 #endif // YOUNG_LIST_VERBOSE |
342 | 2894 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2895 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2896 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2897 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2898 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2899 _young_list->reset_auxilary_lists(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2900 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2901 } else { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2902 // We have abandoned the current collection. This can only happen |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2903 // if we're not doing young or partially young collections, and |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2904 // we didn't find an old region that we're able to collect within |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2905 // the allowed time. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2906 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2907 assert(g1_policy()->collection_set() == NULL, "should be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2908 assert(_young_list->length() == 0, "because it should be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2909 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2910 // This should be a no-op. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2911 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2912 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2913 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2914 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2915 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2916 // Start a new incremental collection set for the next pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2917 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2918 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2919 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2920 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2921 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2922 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2923 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2924 // This looks confusing, because the DPT should really be empty |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2925 // at this point -- since we have not done any collection work, |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2926 // there should not be any derived pointers in the table to update; |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2927 // however, there is some additional state in the DPT which is |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2928 // reset at the end of the (null) "gc" here via the following call. |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2929 // A better approach might be to split off that state resetting work |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2930 // into a separate method that asserts that the DPT is empty and call |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2931 // that here. That is deferred for now. |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2932 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
342 | 2933 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2934 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2935 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2936 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2937 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2938 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2939 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2940 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2941 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2942 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2943 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2944 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2945 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2946 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2947 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2948 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2949 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2950 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2951 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2952 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2953 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2954 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2955 } |
342 | 2956 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2957 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2958 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2959 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2960 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2961 #endif // YOUNG_LIST_VERBOSE |
342 | 2962 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2963 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2964 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2965 g1_policy()->record_pause_time_ms(pause_time_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2966 g1_policy()->record_collection_pause_end(abandoned); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2967 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2968 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2969 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2970 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2971 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2972 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2973 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2974 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2975 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2976 Universe::verify(false); |
342 | 2977 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2978 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2979 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2980 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2981 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2982 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2983 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2984 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2985 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2986 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2987 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2988 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2989 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2990 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2991 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2992 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2993 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2994 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2995 #endif |
342 | 2996 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2997 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2998 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2999 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3000 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3001 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3002 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3003 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3004 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3005 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3006 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3007 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3008 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3009 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3010 Universe::print_heap_after_gc(); |
342 | 3011 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3012 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3013 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3014 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3015 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3016 } |
342 | 3017 } |
3018 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3019 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3020 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3021 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3022 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3023 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3024 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3025 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3026 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3027 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3028 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3029 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3030 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3031 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3032 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3033 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3034 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3035 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3036 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3037 |
342 | 3038 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3039 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3040 // make sure we don't call set_gc_alloc_region() multiple times on |
3041 // the same region | |
3042 assert(r == NULL || !r->is_gc_alloc_region(), | |
3043 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3044 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3045 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3046 |
342 | 3047 HeapWord* original_top = NULL; |
3048 if (r != NULL) | |
3049 original_top = r->top(); | |
3050 | |
3051 // We will want to record the used space in r as being there before gc. | |
3052 // One we install it as a GC alloc region it's eligible for allocation. | |
3053 // So record it now and use it later. | |
3054 size_t r_used = 0; | |
3055 if (r != NULL) { | |
3056 r_used = r->used(); | |
3057 | |
3058 if (ParallelGCThreads > 0) { | |
3059 // need to take the lock to guard against two threads calling | |
3060 // get_gc_alloc_region concurrently (very unlikely but...) | |
3061 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3062 r->save_marks(); | |
3063 } | |
3064 } | |
3065 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3066 _gc_alloc_regions[purpose] = r; | |
3067 if (old_alloc_region != NULL) { | |
3068 // Replace aliases too. | |
3069 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3070 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3071 _gc_alloc_regions[ap] = r; | |
3072 } | |
3073 } | |
3074 } | |
3075 if (r != NULL) { | |
3076 push_gc_alloc_region(r); | |
3077 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3078 // We are using a region as a GC alloc region after it has been used | |
3079 // as a mutator allocation region during the current marking cycle. | |
3080 // The mutator-allocated objects are currently implicitly marked, but | |
3081 // when we move hr->next_top_at_mark_start() forward at the the end | |
3082 // of the GC pause, they won't be. We therefore mark all objects in | |
3083 // the "gap". We do this object-by-object, since marking densely | |
3084 // does not currently work right with marking bitmap iteration. This | |
3085 // means we rely on TLAB filling at the start of pauses, and no | |
3086 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3087 // to fix the marking bitmap iteration. | |
3088 HeapWord* curhw = r->next_top_at_mark_start(); | |
3089 HeapWord* t = original_top; | |
3090 | |
3091 while (curhw < t) { | |
3092 oop cur = (oop)curhw; | |
3093 // We'll assume parallel for generality. This is rare code. | |
3094 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3095 curhw = curhw + cur->size(); | |
3096 } | |
3097 assert(curhw == t, "Should have parsed correctly."); | |
3098 } | |
3099 if (G1PolicyVerbose > 1) { | |
3100 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3101 "for survivors:", r->bottom(), original_top, r->end()); | |
3102 r->print(); | |
3103 } | |
3104 g1_policy()->record_before_bytes(r_used); | |
3105 } | |
3106 } | |
3107 | |
3108 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3109 assert(Thread::current()->is_VM_thread() || | |
3110 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
3111 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
3112 "Precondition."); | |
3113 hr->set_is_gc_alloc_region(true); | |
3114 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3115 _gc_alloc_region_list = hr; | |
3116 } | |
3117 | |
3118 #ifdef G1_DEBUG | |
3119 class FindGCAllocRegion: public HeapRegionClosure { | |
3120 public: | |
3121 bool doHeapRegion(HeapRegion* r) { | |
3122 if (r->is_gc_alloc_region()) { | |
3123 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3124 r->hrs_index(), r->bottom()); | |
3125 } | |
3126 return false; | |
3127 } | |
3128 }; | |
3129 #endif // G1_DEBUG | |
3130 | |
3131 void G1CollectedHeap::forget_alloc_region_list() { | |
3132 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
3133 while (_gc_alloc_region_list != NULL) { | |
3134 HeapRegion* r = _gc_alloc_region_list; | |
3135 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3136 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3137 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3138 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3139 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3140 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3141 r->ContiguousSpace::set_saved_mark(); |
342 | 3142 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3143 r->set_next_gc_alloc_region(NULL); | |
3144 r->set_is_gc_alloc_region(false); | |
545 | 3145 if (r->is_survivor()) { |
3146 if (r->is_empty()) { | |
3147 r->set_not_young(); | |
3148 } else { | |
3149 _young_list->add_survivor_region(r); | |
3150 } | |
3151 } | |
342 | 3152 if (r->is_empty()) { |
3153 ++_free_regions; | |
3154 } | |
3155 } | |
3156 #ifdef G1_DEBUG | |
3157 FindGCAllocRegion fa; | |
3158 heap_region_iterate(&fa); | |
3159 #endif // G1_DEBUG | |
3160 } | |
3161 | |
3162 | |
3163 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3164 // TODO: allocation regions check | |
3165 return true; | |
3166 } | |
3167 | |
3168 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3169 // First, let's check that the GC alloc region list is empty (it should) |
3170 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3171 | |
342 | 3172 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3173 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3174 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3175 |
342 | 3176 // Create new GC alloc regions. |
636 | 3177 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3178 _retained_gc_alloc_regions[ap] = NULL; | |
3179 | |
3180 if (alloc_region != NULL) { | |
3181 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3182 | |
3183 // let's make sure that the GC alloc region is not tagged as such | |
3184 // outside a GC operation | |
3185 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3186 | |
3187 if (alloc_region->in_collection_set() || | |
3188 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3189 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3190 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3191 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3192 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3193 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3194 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3195 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3196 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3197 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3198 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3199 // object that may be less than the region size). |
636 | 3200 |
3201 alloc_region = NULL; | |
3202 } | |
3203 } | |
3204 | |
3205 if (alloc_region == NULL) { | |
3206 // we will get a new GC alloc region | |
342 | 3207 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3208 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3209 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3210 ++_gc_alloc_region_counts[ap]; |
1388 | 3211 if (G1PrintHeapRegions) { |
3212 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3213 "top "PTR_FORMAT, | |
3214 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3215 } | |
342 | 3216 } |
636 | 3217 |
342 | 3218 if (alloc_region != NULL) { |
636 | 3219 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3220 set_gc_alloc_region(ap, alloc_region); |
3221 } | |
636 | 3222 |
3223 assert(_gc_alloc_regions[ap] == NULL || | |
3224 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3225 "the GC alloc region should be tagged as such"); | |
3226 assert(_gc_alloc_regions[ap] == NULL || | |
3227 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3228 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3229 } |
3230 // Set alternative regions for allocation purposes that have reached | |
636 | 3231 // their limit. |
342 | 3232 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3233 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3234 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3235 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3236 } | |
3237 } | |
3238 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3239 } | |
3240 | |
636 | 3241 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3242 // We keep a separate list of all regions that have been alloc regions in |
636 | 3243 // the current collection pause. Forget that now. This method will |
3244 // untag the GC alloc regions and tear down the GC alloc region | |
3245 // list. It's desirable that no regions are tagged as GC alloc | |
3246 // outside GCs. | |
342 | 3247 forget_alloc_region_list(); |
3248 | |
3249 // The current alloc regions contain objs that have survived | |
3250 // collection. Make them no longer GC alloc regions. | |
3251 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3252 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3253 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3254 _gc_alloc_region_counts[ap] = 0; |
636 | 3255 |
3256 if (r != NULL) { | |
3257 // we retain nothing on _gc_alloc_regions between GCs | |
3258 set_gc_alloc_region(ap, NULL); | |
3259 | |
3260 if (r->is_empty()) { | |
3261 // we didn't actually allocate anything in it; let's just put | |
3262 // it on the free list | |
342 | 3263 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3264 r->set_zero_fill_complete(); | |
3265 put_free_region_on_list_locked(r); | |
636 | 3266 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3267 // retain it so that we can use it at the beginning of the next GC | |
3268 _retained_gc_alloc_regions[ap] = r; | |
342 | 3269 } |
3270 } | |
636 | 3271 } |
3272 } | |
3273 | |
3274 #ifndef PRODUCT | |
3275 // Useful for debugging | |
3276 | |
3277 void G1CollectedHeap::print_gc_alloc_regions() { | |
3278 gclog_or_tty->print_cr("GC alloc regions"); | |
3279 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3280 HeapRegion* r = _gc_alloc_regions[ap]; | |
3281 if (r == NULL) { | |
3282 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3283 } else { | |
3284 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3285 ap, r->bottom(), r->used()); | |
3286 } | |
3287 } | |
3288 } | |
3289 #endif // PRODUCT | |
342 | 3290 |
3291 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3292 _drain_in_progress = false; | |
3293 set_evac_failure_closure(cl); | |
3294 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3295 } | |
3296 | |
3297 void G1CollectedHeap::finalize_for_evac_failure() { | |
3298 assert(_evac_failure_scan_stack != NULL && | |
3299 _evac_failure_scan_stack->length() == 0, | |
3300 "Postcondition"); | |
3301 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3302 delete _evac_failure_scan_stack; |
342 | 3303 _evac_failure_scan_stack = NULL; |
3304 } | |
3305 | |
3306 | |
3307 | |
3308 // *** Sequential G1 Evacuation | |
3309 | |
3310 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
3311 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3312 // let the caller handle alloc failure | |
3313 if (alloc_region == NULL) return NULL; | |
3314 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
3315 "Either the object is humongous or the region isn't"); | |
3316 HeapWord* block = alloc_region->allocate(word_size); | |
3317 if (block == NULL) { | |
3318 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
3319 } | |
3320 return block; | |
3321 } | |
3322 | |
3323 class G1IsAliveClosure: public BoolObjectClosure { | |
3324 G1CollectedHeap* _g1; | |
3325 public: | |
3326 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3327 void do_object(oop p) { assert(false, "Do not call."); } | |
3328 bool do_object_b(oop p) { | |
3329 // It is reachable if it is outside the collection set, or is inside | |
3330 // and forwarded. | |
3331 | |
3332 #ifdef G1_DEBUG | |
3333 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3334 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3335 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3336 #endif // G1_DEBUG | |
3337 | |
3338 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3339 } | |
3340 }; | |
3341 | |
3342 class G1KeepAliveClosure: public OopClosure { | |
3343 G1CollectedHeap* _g1; | |
3344 public: | |
3345 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3346 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3347 void do_oop( oop* p) { |
342 | 3348 oop obj = *p; |
3349 #ifdef G1_DEBUG | |
3350 if (PrintGC && Verbose) { | |
3351 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3352 p, (void*) obj, (void*) *p); | |
3353 } | |
3354 #endif // G1_DEBUG | |
3355 | |
3356 if (_g1->obj_in_cs(obj)) { | |
3357 assert( obj->is_forwarded(), "invariant" ); | |
3358 *p = obj->forwardee(); | |
3359 #ifdef G1_DEBUG | |
3360 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3361 (void*) obj, (void*) *p); | |
3362 #endif // G1_DEBUG | |
3363 } | |
3364 } | |
3365 }; | |
3366 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3367 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3368 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3369 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3370 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3371 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3372 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3373 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3374 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3375 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3376 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3377 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3378 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3379 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3380 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3381 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3382 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3383 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3384 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3385 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3386 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3387 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3388 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3389 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3390 |
342 | 3391 class RemoveSelfPointerClosure: public ObjectClosure { |
3392 private: | |
3393 G1CollectedHeap* _g1; | |
3394 ConcurrentMark* _cm; | |
3395 HeapRegion* _hr; | |
3396 size_t _prev_marked_bytes; | |
3397 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3398 OopsInHeapRegionClosure *_cl; |
342 | 3399 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3400 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3401 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3402 _next_marked_bytes(0), _cl(cl) {} |
342 | 3403 |
3404 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3405 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3406 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3407 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3408 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3409 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3410 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3411 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3412 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3413 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3414 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3415 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3416 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3417 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3418 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3419 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3420 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3421 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3422 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3423 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3424 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3425 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3426 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3427 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3428 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3429 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3430 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3431 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3432 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3433 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3434 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3435 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3436 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3437 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3438 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3439 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3440 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3441 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3442 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3443 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3444 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3445 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3446 CollectedHeap::fill_with_object(mr); |
342 | 3447 _cm->clearRangeBothMaps(mr); |
3448 } | |
3449 } | |
3450 }; | |
3451 | |
3452 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3453 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3454 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3455 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3456 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3457 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3458 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3459 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3460 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3461 } |
342 | 3462 HeapRegion* cur = g1_policy()->collection_set(); |
3463 while (cur != NULL) { | |
3464 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3465 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3466 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3467 if (cur->evacuation_failed()) { |
3468 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3469 cl->set_region(cur); |
342 | 3470 cur->object_iterate(&rspc); |
3471 | |
3472 // A number of manipulations to make the TAMS be the current top, | |
3473 // and the marked bytes be the ones observed in the iteration. | |
3474 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3475 // The comments below are the postconditions achieved by the | |
3476 // calls. Note especially the last such condition, which says that | |
3477 // the count of marked bytes has been properly restored. | |
3478 cur->note_start_of_marking(false); | |
3479 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3480 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3481 // _next_marked_bytes == prev_marked_bytes. | |
3482 cur->note_end_of_marking(); | |
3483 // _prev_top_at_mark_start == top(), | |
3484 // _prev_marked_bytes == prev_marked_bytes | |
3485 } | |
3486 // If there is no mark in progress, we modified the _next variables | |
3487 // above needlessly, but harmlessly. | |
3488 if (_g1h->mark_in_progress()) { | |
3489 cur->note_start_of_marking(false); | |
3490 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3491 // _next_marked_bytes == next_marked_bytes. | |
3492 } | |
3493 | |
3494 // Now make sure the region has the right index in the sorted array. | |
3495 g1_policy()->note_change_in_marked_bytes(cur); | |
3496 } | |
3497 cur = cur->next_in_collection_set(); | |
3498 } | |
3499 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3500 | |
3501 // Now restore saved marks, if any. | |
3502 if (_objs_with_preserved_marks != NULL) { | |
3503 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3504 assert(_objs_with_preserved_marks->length() == | |
3505 _preserved_marks_of_objs->length(), "Both or none."); | |
3506 guarantee(_objs_with_preserved_marks->length() == | |
3507 _preserved_marks_of_objs->length(), "Both or none."); | |
3508 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3509 oop obj = _objs_with_preserved_marks->at(i); | |
3510 markOop m = _preserved_marks_of_objs->at(i); | |
3511 obj->set_mark(m); | |
3512 } | |
3513 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3514 delete _objs_with_preserved_marks; | |
3515 delete _preserved_marks_of_objs; | |
3516 _objs_with_preserved_marks = NULL; | |
3517 _preserved_marks_of_objs = NULL; | |
3518 } | |
3519 } | |
3520 | |
3521 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3522 _evac_failure_scan_stack->push(obj); | |
3523 } | |
3524 | |
3525 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3526 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3527 | |
3528 while (_evac_failure_scan_stack->length() > 0) { | |
3529 oop obj = _evac_failure_scan_stack->pop(); | |
3530 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3531 obj->oop_iterate_backwards(_evac_failure_closure); | |
3532 } | |
3533 } | |
3534 | |
3535 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3536 markOop m = old->mark(); | |
3537 // forward to self | |
3538 assert(!old->is_forwarded(), "precondition"); | |
3539 | |
3540 old->forward_to(old); | |
3541 handle_evacuation_failure_common(old, m); | |
3542 } | |
3543 | |
3544 oop | |
3545 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3546 oop old) { | |
3547 markOop m = old->mark(); | |
3548 oop forward_ptr = old->forward_to_atomic(old); | |
3549 if (forward_ptr == NULL) { | |
3550 // Forward-to-self succeeded. | |
3551 if (_evac_failure_closure != cl) { | |
3552 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3553 assert(!_drain_in_progress, | |
3554 "Should only be true while someone holds the lock."); | |
3555 // Set the global evac-failure closure to the current thread's. | |
3556 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3557 set_evac_failure_closure(cl); | |
3558 // Now do the common part. | |
3559 handle_evacuation_failure_common(old, m); | |
3560 // Reset to NULL. | |
3561 set_evac_failure_closure(NULL); | |
3562 } else { | |
3563 // The lock is already held, and this is recursive. | |
3564 assert(_drain_in_progress, "This should only be the recursive case."); | |
3565 handle_evacuation_failure_common(old, m); | |
3566 } | |
3567 return old; | |
3568 } else { | |
3569 // Someone else had a place to copy it. | |
3570 return forward_ptr; | |
3571 } | |
3572 } | |
3573 | |
3574 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3575 set_evacuation_failed(true); | |
3576 | |
3577 preserve_mark_if_necessary(old, m); | |
3578 | |
3579 HeapRegion* r = heap_region_containing(old); | |
3580 if (!r->evacuation_failed()) { | |
3581 r->set_evacuation_failed(true); | |
1282 | 3582 if (G1PrintHeapRegions) { |
342 | 3583 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
3584 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3585 r, r->bottom(), r->end()); | |
3586 } | |
3587 } | |
3588 | |
3589 push_on_evac_failure_scan_stack(old); | |
3590 | |
3591 if (!_drain_in_progress) { | |
3592 // prevent recursion in copy_to_survivor_space() | |
3593 _drain_in_progress = true; | |
3594 drain_evac_failure_scan_stack(); | |
3595 _drain_in_progress = false; | |
3596 } | |
3597 } | |
3598 | |
3599 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3600 if (m != markOopDesc::prototype()) { | |
3601 if (_objs_with_preserved_marks == NULL) { | |
3602 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3603 _objs_with_preserved_marks = | |
3604 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3605 _preserved_marks_of_objs = | |
3606 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3607 } | |
3608 _objs_with_preserved_marks->push(obj); | |
3609 _preserved_marks_of_objs->push(m); | |
3610 } | |
3611 } | |
3612 | |
3613 // *** Parallel G1 Evacuation | |
3614 | |
3615 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3616 size_t word_size) { | |
3617 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3618 // let the caller handle alloc failure | |
3619 if (alloc_region == NULL) return NULL; | |
3620 | |
3621 HeapWord* block = alloc_region->par_allocate(word_size); | |
3622 if (block == NULL) { | |
3623 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3624 Mutex::_no_safepoint_check_flag); | |
3625 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3626 } | |
3627 return block; | |
3628 } | |
3629 | |
545 | 3630 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
3631 bool par) { | |
3632 // Another thread might have obtained alloc_region for the given | |
3633 // purpose, and might be attempting to allocate in it, and might | |
3634 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3635 // region below until we're sure the last allocation has happened. | |
3636 // We ensure this by allocating the remaining space with a garbage | |
3637 // object. | |
3638 if (par) par_allocate_remaining_space(alloc_region); | |
3639 // Now we can do the post-GC stuff on the region. | |
3640 alloc_region->note_end_of_copying(); | |
3641 g1_policy()->record_after_bytes(alloc_region->used()); | |
3642 } | |
3643 | |
342 | 3644 HeapWord* |
3645 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3646 HeapRegion* alloc_region, | |
3647 bool par, | |
3648 size_t word_size) { | |
3649 HeapWord* block = NULL; | |
3650 // In the parallel case, a previous thread to obtain the lock may have | |
3651 // already assigned a new gc_alloc_region. | |
3652 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3653 assert(par, "But should only happen in parallel case."); | |
3654 alloc_region = _gc_alloc_regions[purpose]; | |
3655 if (alloc_region == NULL) return NULL; | |
3656 block = alloc_region->par_allocate(word_size); | |
3657 if (block != NULL) return block; | |
3658 // Otherwise, continue; this new region is empty, too. | |
3659 } | |
3660 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 3661 retire_alloc_region(alloc_region, par); |
342 | 3662 |
3663 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3664 // Cannot allocate more regions for the given purpose. | |
3665 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3666 // Is there an alternative? | |
3667 if (purpose != alt_purpose) { | |
3668 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3669 // Has not the alternative region been aliased? | |
545 | 3670 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 3671 // Try to allocate in the alternative region. |
3672 if (par) { | |
3673 block = alt_region->par_allocate(word_size); | |
3674 } else { | |
3675 block = alt_region->allocate(word_size); | |
3676 } | |
3677 // Make an alias. | |
3678 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 3679 if (block != NULL) { |
3680 return block; | |
3681 } | |
3682 retire_alloc_region(alt_region, par); | |
342 | 3683 } |
3684 // Both the allocation region and the alternative one are full | |
3685 // and aliased, replace them with a new allocation region. | |
3686 purpose = alt_purpose; | |
3687 } else { | |
3688 set_gc_alloc_region(purpose, NULL); | |
3689 return NULL; | |
3690 } | |
3691 } | |
3692 | |
3693 // Now allocate a new region for allocation. | |
3694 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3695 | |
3696 // let the caller handle alloc failure | |
3697 if (alloc_region != NULL) { | |
3698 | |
3699 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3700 assert(alloc_region->saved_mark_at_top(), | |
3701 "Mark should have been saved already."); | |
3702 // We used to assert that the region was zero-filled here, but no | |
3703 // longer. | |
3704 | |
3705 // This must be done last: once it's installed, other regions may | |
3706 // allocate in it (without holding the lock.) | |
3707 set_gc_alloc_region(purpose, alloc_region); | |
3708 | |
3709 if (par) { | |
3710 block = alloc_region->par_allocate(word_size); | |
3711 } else { | |
3712 block = alloc_region->allocate(word_size); | |
3713 } | |
3714 // Caller handles alloc failure. | |
3715 } else { | |
3716 // This sets other apis using the same old alloc region to NULL, also. | |
3717 set_gc_alloc_region(purpose, NULL); | |
3718 } | |
3719 return block; // May be NULL. | |
3720 } | |
3721 | |
3722 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3723 HeapWord* block = NULL; | |
3724 size_t free_words; | |
3725 do { | |
3726 free_words = r->free()/HeapWordSize; | |
3727 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
3728 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 3729 // Otherwise, try to claim it. |
3730 block = r->par_allocate(free_words); | |
3731 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3732 fill_with_object(block, free_words); |
342 | 3733 } |
3734 | |
3735 #ifndef PRODUCT | |
3736 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3737 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3738 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3739 return true; | |
3740 } | |
3741 #endif // PRODUCT | |
3742 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3743 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3744 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3745 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3746 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3747 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3748 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3749 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3750 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3751 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3752 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3753 _age_table(false), |
342 | 3754 #if G1_DETAILED_STATS |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3755 _pushes(0), _pops(0), _steals(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3756 _steal_attempts(0), _overflow_pushes(0), |
342 | 3757 #endif |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3758 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3759 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3760 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3761 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3762 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3763 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3764 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3765 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3766 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3767 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3768 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3769 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3770 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3771 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3772 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3773 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3774 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3775 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3776 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3777 _overflowed_refs = new OverflowQueue(10); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3778 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3779 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3780 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3781 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3782 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3783 } |
342 | 3784 |
3785 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | |
3786 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
3787 _par_scan_state(par_scan_state) { } | |
3788 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3789 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 3790 // This is called _after_ do_oop_work has been called, hence after |
3791 // the object has been relocated to its new location and *p points | |
3792 // to its new location. | |
3793 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3794 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3795 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3796 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3797 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 3798 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3799 HeapWord* addr = (HeapWord*)obj; |
342 | 3800 if (_g1->is_in_g1_reserved(addr)) |
3801 _cm->grayRoot(oop(addr)); | |
3802 } | |
3803 } | |
3804 | |
3805 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
3806 size_t word_sz = old->size(); | |
3807 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
3808 // +1 to make the -1 indexes valid... | |
3809 int young_index = from_region->young_index_in_cset()+1; | |
3810 assert( (from_region->is_young() && young_index > 0) || | |
3811 (!from_region->is_young() && young_index == 0), "invariant" ); | |
3812 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
3813 markOop m = old->mark(); | |
545 | 3814 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
3815 : m->age(); | |
3816 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 3817 word_sz); |
3818 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
3819 oop obj = oop(obj_ptr); | |
3820 | |
3821 if (obj_ptr == NULL) { | |
3822 // This will either forward-to-self, or detect that someone else has | |
3823 // installed a forwarding pointer. | |
3824 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
3825 return _g1->handle_evacuation_failure_par(cl, old); | |
3826 } | |
3827 | |
526 | 3828 // We're going to allocate linearly, so might as well prefetch ahead. |
3829 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
3830 | |
342 | 3831 oop forward_ptr = old->forward_to_atomic(obj); |
3832 if (forward_ptr == NULL) { | |
3833 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 3834 if (g1p->track_object_age(alloc_purpose)) { |
3835 // We could simply do obj->incr_age(). However, this causes a | |
3836 // performance issue. obj->incr_age() will first check whether | |
3837 // the object has a displaced mark by checking its mark word; | |
3838 // getting the mark word from the new location of the object | |
3839 // stalls. So, given that we already have the mark word and we | |
3840 // are about to install it anyway, it's better to increase the | |
3841 // age on the mark word, when the object does not have a | |
3842 // displaced mark word. We're not expecting many objects to have | |
3843 // a displaced marked word, so that case is not optimized | |
3844 // further (it could be...) and we simply call obj->incr_age(). | |
3845 | |
3846 if (m->has_displaced_mark_helper()) { | |
3847 // in this case, we have to install the mark word first, | |
3848 // otherwise obj looks to be forwarded (the old mark word, | |
3849 // which contains the forward pointer, was copied) | |
3850 obj->set_mark(m); | |
3851 obj->incr_age(); | |
3852 } else { | |
3853 m = m->incr_age(); | |
545 | 3854 obj->set_mark(m); |
526 | 3855 } |
545 | 3856 _par_scan_state->age_table()->add(obj, word_sz); |
3857 } else { | |
3858 obj->set_mark(m); | |
526 | 3859 } |
3860 | |
342 | 3861 // preserve "next" mark bit |
3862 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
3863 if (!use_local_bitmaps || | |
3864 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
3865 // if we couldn't mark it on the local bitmap (this happens when | |
3866 // the object was not allocated in the GCLab), we have to bite | |
3867 // the bullet and do the standard parallel mark | |
3868 _cm->markAndGrayObjectIfNecessary(obj); | |
3869 } | |
3870 #if 1 | |
3871 if (_g1->isMarkedNext(old)) { | |
3872 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
3873 } | |
3874 #endif | |
3875 } | |
3876 | |
3877 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
3878 surv_young_words[young_index] += word_sz; | |
3879 | |
3880 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
3881 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3882 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3883 _par_scan_state->push_on_queue(old_p); |
342 | 3884 } else { |
526 | 3885 // No point in using the slower heap_region_containing() method, |
3886 // given that we know obj is in the heap. | |
3887 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 3888 obj->oop_iterate_backwards(_scanner); |
3889 } | |
3890 } else { | |
3891 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
3892 obj = forward_ptr; | |
3893 } | |
3894 return obj; | |
3895 } | |
3896 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3897 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3898 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3899 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3900 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3901 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 3902 assert(barrier != G1BarrierRS || obj != NULL, |
3903 "Precondition: G1BarrierRS implies obj is nonNull"); | |
3904 | |
526 | 3905 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3906 if (_g1->in_cset_fast_test(obj)) { |
342 | 3907 #if G1_REM_SET_LOGGING |
526 | 3908 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
3909 "into CS.", p, (void*) obj); | |
342 | 3910 #endif |
526 | 3911 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3912 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 3913 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3914 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3915 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 3916 } |
526 | 3917 // When scanning the RS, we only care about objs in CS. |
3918 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3919 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 3920 } |
526 | 3921 } |
3922 | |
3923 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3924 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 3925 } |
3926 | |
3927 if (do_gen_barrier && obj != NULL) { | |
3928 par_do_barrier(p); | |
3929 } | |
3930 } | |
3931 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3932 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3933 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3934 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3935 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 3936 assert(has_partial_array_mask(p), "invariant"); |
3937 oop old = clear_partial_array_mask(p); | |
342 | 3938 assert(old->is_objArray(), "must be obj array"); |
3939 assert(old->is_forwarded(), "must be forwarded"); | |
3940 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
3941 | |
3942 objArrayOop obj = objArrayOop(old->forwardee()); | |
3943 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
3944 // Process ParGCArrayScanChunk elements now | |
3945 // and push the remainder back onto queue | |
3946 int start = arrayOop(old)->length(); | |
3947 int end = obj->length(); | |
3948 int remainder = end - start; | |
3949 assert(start <= end, "just checking"); | |
3950 if (remainder > 2 * ParGCArrayScanChunk) { | |
3951 // Test above combines last partial chunk with a full chunk | |
3952 end = start + ParGCArrayScanChunk; | |
3953 arrayOop(old)->set_length(end); | |
3954 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3955 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3956 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3957 _par_scan_state->push_on_queue(old_p); |
342 | 3958 } else { |
3959 // Restore length so that the heap remains parsable in | |
3960 // case of evacuation failure. | |
3961 arrayOop(old)->set_length(end); | |
3962 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3963 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 3964 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3965 obj->oop_iterate_range(&_scanner, start, end); |
342 | 3966 } |
3967 | |
3968 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
3969 protected: | |
3970 G1CollectedHeap* _g1h; | |
3971 G1ParScanThreadState* _par_scan_state; | |
3972 RefToScanQueueSet* _queues; | |
3973 ParallelTaskTerminator* _terminator; | |
3974 | |
3975 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
3976 RefToScanQueueSet* queues() { return _queues; } | |
3977 ParallelTaskTerminator* terminator() { return _terminator; } | |
3978 | |
3979 public: | |
3980 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
3981 G1ParScanThreadState* par_scan_state, | |
3982 RefToScanQueueSet* queues, | |
3983 ParallelTaskTerminator* terminator) | |
3984 : _g1h(g1h), _par_scan_state(par_scan_state), | |
3985 _queues(queues), _terminator(terminator) {} | |
3986 | |
3987 void do_void() { | |
3988 G1ParScanThreadState* pss = par_scan_state(); | |
3989 while (true) { | |
3990 pss->trim_queue(); | |
3991 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3992 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3993 StarTask stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3994 if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
342 | 3995 IF_G1_DETAILED_STATS(pss->note_steal()); |
526 | 3996 |
3997 // slightly paranoid tests; I'm trying to catch potential | |
3998 // problems before we go into push_on_queue to know where the | |
3999 // problem is coming from | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4000 assert((oop*)stolen_task != NULL, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4001 if (stolen_task.is_narrow()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4002 assert(UseCompressedOops, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4003 narrowOop* p = (narrowOop*) stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4004 assert(has_partial_array_mask(p) || |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4005 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4006 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4007 } else { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4008 oop* p = (oop*) stolen_task; |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4009 assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4010 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4011 } |
342 | 4012 continue; |
4013 } | |
4014 pss->start_term_time(); | |
4015 if (terminator()->offer_termination()) break; | |
4016 pss->end_term_time(); | |
4017 } | |
4018 pss->end_term_time(); | |
4019 pss->retire_alloc_buffers(); | |
4020 } | |
4021 }; | |
4022 | |
4023 class G1ParTask : public AbstractGangTask { | |
4024 protected: | |
4025 G1CollectedHeap* _g1h; | |
4026 RefToScanQueueSet *_queues; | |
4027 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4028 int _n_workers; |
342 | 4029 |
4030 Mutex _stats_lock; | |
4031 Mutex* stats_lock() { return &_stats_lock; } | |
4032 | |
4033 size_t getNCards() { | |
4034 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4035 / G1BlockOffsetSharedArray::N_bytes; | |
4036 } | |
4037 | |
4038 public: | |
4039 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4040 : AbstractGangTask("G1 collection"), | |
4041 _g1h(g1h), | |
4042 _queues(task_queues), | |
4043 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4044 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4045 _n_workers(workers) |
342 | 4046 {} |
4047 | |
4048 RefToScanQueueSet* queues() { return _queues; } | |
4049 | |
4050 RefToScanQueue *work_queue(int i) { | |
4051 return queues()->queue(i); | |
4052 } | |
4053 | |
4054 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4055 if (i >= _n_workers) return; // no work needed this round |
1611 | 4056 |
4057 double start_time_ms = os::elapsedTime() * 1000.0; | |
4058 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4059 | |
342 | 4060 ResourceMark rm; |
4061 HandleMark hm; | |
4062 | |
526 | 4063 G1ParScanThreadState pss(_g1h, i); |
4064 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4065 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4066 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4067 |
4068 pss.set_evac_closure(&scan_evac_cl); | |
4069 pss.set_evac_failure_closure(&evac_failure_cl); | |
4070 pss.set_partial_scan_closure(&partial_scan_cl); | |
4071 | |
4072 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4073 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4074 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4075 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4076 |
342 | 4077 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4078 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4079 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4080 | |
4081 OopsInHeapRegionClosure *scan_root_cl; | |
4082 OopsInHeapRegionClosure *scan_perm_cl; | |
4083 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4084 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4085 scan_root_cl = &scan_mark_root_cl; |
4086 scan_perm_cl = &scan_mark_perm_cl; | |
4087 } else { | |
4088 scan_root_cl = &only_scan_root_cl; | |
4089 scan_perm_cl = &only_scan_perm_cl; | |
4090 } | |
4091 | |
4092 pss.start_strong_roots(); | |
4093 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4094 SharedHeap::SO_AllClasses, | |
4095 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4096 &push_heap_rs_cl, |
342 | 4097 scan_perm_cl, |
4098 i); | |
4099 pss.end_strong_roots(); | |
4100 { | |
4101 double start = os::elapsedTime(); | |
4102 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4103 evac.do_void(); | |
4104 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4105 double term_ms = pss.term_time()*1000.0; | |
4106 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4107 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4108 } |
1282 | 4109 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4110 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4111 | |
4112 // Clean up any par-expanded rem sets. | |
4113 HeapRegionRemSet::par_cleanup(); | |
4114 | |
4115 MutexLocker x(stats_lock()); | |
4116 if (ParallelGCVerbose) { | |
4117 gclog_or_tty->print("Thread %d complete:\n", i); | |
4118 #if G1_DETAILED_STATS | |
4119 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", | |
4120 pss.pushes(), | |
4121 pss.pops(), | |
4122 pss.overflow_pushes(), | |
4123 pss.steals(), | |
4124 pss.steal_attempts()); | |
4125 #endif | |
4126 double elapsed = pss.elapsed(); | |
4127 double strong_roots = pss.strong_roots_time(); | |
4128 double term = pss.term_time(); | |
4129 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" | |
4130 " Strong roots: %7.2f ms (%6.2f%%)\n" | |
1611 | 4131 " Termination: %7.2f ms (%6.2f%%) " |
4132 "(in "SIZE_FORMAT" entries)\n", | |
342 | 4133 elapsed * 1000.0, |
4134 strong_roots * 1000.0, (strong_roots*100.0/elapsed), | |
4135 term * 1000.0, (term*100.0/elapsed), | |
4136 pss.term_attempts()); | |
4137 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); | |
4138 gclog_or_tty->print(" Waste: %8dK\n" | |
4139 " Alloc Buffer: %8dK\n" | |
4140 " Undo: %8dK\n", | |
4141 (total_waste * HeapWordSize) / K, | |
4142 (pss.alloc_buffer_waste() * HeapWordSize) / K, | |
4143 (pss.undo_waste() * HeapWordSize) / K); | |
4144 } | |
4145 | |
4146 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
4147 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
1611 | 4148 double end_time_ms = os::elapsedTime() * 1000.0; |
4149 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4150 } |
4151 }; | |
4152 | |
4153 // *** Common G1 Evacuation Stuff | |
4154 | |
4155 void | |
4156 G1CollectedHeap:: | |
4157 g1_process_strong_roots(bool collecting_perm_gen, | |
4158 SharedHeap::ScanningOption so, | |
4159 OopClosure* scan_non_heap_roots, | |
4160 OopsInHeapRegionClosure* scan_rs, | |
4161 OopsInGenClosure* scan_perm, | |
4162 int worker_i) { | |
4163 // First scan the strong roots, including the perm gen. | |
4164 double ext_roots_start = os::elapsedTime(); | |
4165 double closure_app_time_sec = 0.0; | |
4166 | |
4167 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4168 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4169 buf_scan_perm.set_generation(perm_gen()); | |
4170 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4171 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4172 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4173 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4174 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4175 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4176 collecting_perm_gen, so, |
342 | 4177 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4178 &eager_scan_code_roots, |
342 | 4179 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4180 |
342 | 4181 // Finish up any enqueued closure apps. |
4182 buf_scan_non_heap_roots.done(); | |
4183 buf_scan_perm.done(); | |
4184 double ext_roots_end = os::elapsedTime(); | |
4185 g1_policy()->reset_obj_copy_time(worker_i); | |
4186 double obj_copy_time_sec = | |
4187 buf_scan_non_heap_roots.closure_app_seconds() + | |
4188 buf_scan_perm.closure_app_seconds(); | |
4189 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4190 double ext_root_time_ms = | |
4191 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4192 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4193 | |
4194 // Scan strong roots in mark stack. | |
4195 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4196 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4197 } | |
4198 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4199 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4200 | |
4201 // XXX What should this be doing in the parallel case? | |
4202 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4203 // Now scan the complement of the collection set. | |
4204 if (scan_rs != NULL) { | |
4205 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4206 } | |
4207 // Finish with the ref_processor roots. | |
4208 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4209 ref_processor()->oops_do(scan_non_heap_roots); | |
4210 } | |
4211 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4212 _process_strong_tasks->all_tasks_completed(); | |
4213 } | |
4214 | |
4215 void | |
4216 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4217 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4218 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4219 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4220 } |
4221 | |
4222 | |
4223 class SaveMarksClosure: public HeapRegionClosure { | |
4224 public: | |
4225 bool doHeapRegion(HeapRegion* r) { | |
4226 r->save_marks(); | |
4227 return false; | |
4228 } | |
4229 }; | |
4230 | |
4231 void G1CollectedHeap::save_marks() { | |
4232 if (ParallelGCThreads == 0) { | |
4233 SaveMarksClosure sm; | |
4234 heap_region_iterate(&sm); | |
4235 } | |
4236 // We do this even in the parallel case | |
4237 perm_gen()->save_marks(); | |
4238 } | |
4239 | |
4240 void G1CollectedHeap::evacuate_collection_set() { | |
4241 set_evacuation_failed(false); | |
4242 | |
4243 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4244 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4245 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4246 | |
342 | 4247 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4248 set_par_threads(n_workers); | |
4249 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4250 | |
4251 init_for_evac_failure(NULL); | |
4252 | |
4253 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4254 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4255 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4256 double start_par = os::elapsedTime(); |
4257 if (ParallelGCThreads > 0) { | |
4258 // The individual threads will set their evac-failure closures. | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4259 StrongRootsScope srs(this); |
342 | 4260 workers()->run_task(&g1_par_task); |
4261 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4262 StrongRootsScope srs(this); |
342 | 4263 g1_par_task.work(0); |
4264 } | |
4265 | |
4266 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4267 g1_policy()->record_par_time(par_time); | |
4268 set_par_threads(0); | |
4269 // Is this the right thing to do here? We don't save marks | |
4270 // on individual heap regions when we allocate from | |
4271 // them in parallel, so this seems like the correct place for this. | |
545 | 4272 retire_all_alloc_regions(); |
342 | 4273 { |
4274 G1IsAliveClosure is_alive(this); | |
4275 G1KeepAliveClosure keep_alive(this); | |
4276 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4277 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4278 release_gc_alloc_regions(false /* totally */); |
342 | 4279 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4280 |
889 | 4281 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4282 concurrent_g1_refine()->set_use_cache(true); |
4283 | |
4284 finalize_for_evac_failure(); | |
4285 | |
4286 // Must do this before removing self-forwarding pointers, which clears | |
4287 // the per-region evac-failure flags. | |
4288 concurrent_mark()->complete_marking_in_collection_set(); | |
4289 | |
4290 if (evacuation_failed()) { | |
4291 remove_self_forwarding_pointers(); | |
4292 if (PrintGCDetails) { | |
4293 gclog_or_tty->print(" (evacuation failed)"); | |
4294 } else if (PrintGC) { | |
4295 gclog_or_tty->print("--"); | |
4296 } | |
4297 } | |
4298 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4299 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4300 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4301 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4302 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4303 |
4304 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4305 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4306 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4307 } |
342 | 4308 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4309 } | |
4310 | |
4311 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4312 size_t pre_used = 0; | |
4313 size_t cleared_h_regions = 0; | |
4314 size_t freed_regions = 0; | |
4315 UncleanRegionList local_list; | |
4316 | |
4317 HeapWord* start = hr->bottom(); | |
4318 HeapWord* end = hr->prev_top_at_mark_start(); | |
4319 size_t used_bytes = hr->used(); | |
4320 size_t live_bytes = hr->max_live_bytes(); | |
4321 if (used_bytes > 0) { | |
4322 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4323 } else { | |
4324 guarantee( live_bytes == 0, "invariant" ); | |
4325 } | |
4326 | |
4327 size_t garbage_bytes = used_bytes - live_bytes; | |
4328 if (garbage_bytes > 0) | |
4329 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4330 | |
4331 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4332 &local_list); | |
4333 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4334 &local_list); | |
4335 } | |
4336 | |
4337 void | |
4338 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4339 size_t& pre_used, | |
4340 size_t& cleared_h_regions, | |
4341 size_t& freed_regions, | |
4342 UncleanRegionList* list, | |
4343 bool par) { | |
4344 pre_used += hr->used(); | |
4345 if (hr->isHumongous()) { | |
4346 assert(hr->startsHumongous(), | |
4347 "Only the start of a humongous region should be freed."); | |
4348 int ind = _hrs->find(hr); | |
4349 assert(ind != -1, "Should have an index."); | |
4350 // Clear the start region. | |
4351 hr->hr_clear(par, true /*clear_space*/); | |
4352 list->insert_before_head(hr); | |
4353 cleared_h_regions++; | |
4354 freed_regions++; | |
4355 // Clear any continued regions. | |
4356 ind++; | |
4357 while ((size_t)ind < n_regions()) { | |
4358 HeapRegion* hrc = _hrs->at(ind); | |
4359 if (!hrc->continuesHumongous()) break; | |
4360 // Otherwise, does continue the H region. | |
4361 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4362 hrc->hr_clear(par, true /*clear_space*/); | |
4363 cleared_h_regions++; | |
4364 freed_regions++; | |
4365 list->insert_before_head(hrc); | |
4366 ind++; | |
4367 } | |
4368 } else { | |
4369 hr->hr_clear(par, true /*clear_space*/); | |
4370 list->insert_before_head(hr); | |
4371 freed_regions++; | |
4372 // If we're using clear2, this should not be enabled. | |
4373 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4374 } | |
4375 } | |
4376 | |
4377 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4378 size_t cleared_h_regions, | |
4379 size_t freed_regions, | |
4380 UncleanRegionList* list) { | |
4381 if (list != NULL && list->sz() > 0) { | |
4382 prepend_region_list_on_unclean_list(list); | |
4383 } | |
4384 // Acquire a lock, if we're parallel, to update possibly-shared | |
4385 // variables. | |
4386 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4387 { | |
4388 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4389 _summary_bytes_used -= pre_used; | |
4390 _num_humongous_regions -= (int) cleared_h_regions; | |
4391 _free_regions += freed_regions; | |
4392 } | |
4393 } | |
4394 | |
4395 | |
4396 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4397 while (list != NULL) { | |
4398 guarantee( list->is_young(), "invariant" ); | |
4399 | |
4400 HeapWord* bottom = list->bottom(); | |
4401 HeapWord* end = list->end(); | |
4402 MemRegion mr(bottom, end); | |
4403 ct_bs->dirty(mr); | |
4404 | |
4405 list = list->get_next_young_region(); | |
4406 } | |
4407 } | |
4408 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4409 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4410 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4411 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4412 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4413 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4414 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4415 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4416 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4417 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4418 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4419 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4420 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4421 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4422 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4423 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4424 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4425 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4426 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4427 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4428 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4429 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4430 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4431 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4432 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4433 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4434 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4435 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4436 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4437 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4438 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4439 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4440 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4441 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4442 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4443 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4444 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4445 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4446 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4447 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4448 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4449 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4450 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4451 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4452 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4453 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4454 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4455 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4456 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4457 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4458 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4459 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4460 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4461 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4462 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4463 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4464 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4465 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4466 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4467 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4468 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4469 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4470 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4471 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4472 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4473 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4474 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4475 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4476 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4477 |
342 | 4478 void G1CollectedHeap::cleanUpCardTable() { |
4479 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4480 double start = os::elapsedTime(); | |
4481 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4482 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4483 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4484 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4485 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4486 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4487 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4488 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4489 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4490 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4491 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4492 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4493 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4494 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4495 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4496 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4497 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4498 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4499 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4500 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4501 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4502 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4503 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4504 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4505 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4506 |
342 | 4507 double elapsed = os::elapsedTime() - start; |
4508 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4509 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4510 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4511 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4512 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4513 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4514 #endif |
342 | 4515 } |
4516 | |
4517 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4518 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4519 do_collection_pause(); | |
4520 } | |
4521 } | |
4522 | |
4523 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4524 double young_time_ms = 0.0; | |
4525 double non_young_time_ms = 0.0; | |
4526 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4527 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4528 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4529 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4530 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4531 |
342 | 4532 G1CollectorPolicy* policy = g1_policy(); |
4533 | |
4534 double start_sec = os::elapsedTime(); | |
4535 bool non_young = true; | |
4536 | |
4537 HeapRegion* cur = cs_head; | |
4538 int age_bound = -1; | |
4539 size_t rs_lengths = 0; | |
4540 | |
4541 while (cur != NULL) { | |
4542 if (non_young) { | |
4543 if (cur->is_young()) { | |
4544 double end_sec = os::elapsedTime(); | |
4545 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4546 non_young_time_ms += elapsed_ms; | |
4547 | |
4548 start_sec = os::elapsedTime(); | |
4549 non_young = false; | |
4550 } | |
4551 } else { | |
4552 if (!cur->is_on_free_list()) { | |
4553 double end_sec = os::elapsedTime(); | |
4554 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4555 young_time_ms += elapsed_ms; | |
4556 | |
4557 start_sec = os::elapsedTime(); | |
4558 non_young = true; | |
4559 } | |
4560 } | |
4561 | |
4562 rs_lengths += cur->rem_set()->occupied(); | |
4563 | |
4564 HeapRegion* next = cur->next_in_collection_set(); | |
4565 assert(cur->in_collection_set(), "bad CS"); | |
4566 cur->set_next_in_collection_set(NULL); | |
4567 cur->set_in_collection_set(false); | |
4568 | |
4569 if (cur->is_young()) { | |
4570 int index = cur->young_index_in_cset(); | |
4571 guarantee( index != -1, "invariant" ); | |
4572 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4573 size_t words_survived = _surviving_young_words[index]; | |
4574 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4575 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4576 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4577 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4578 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4579 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4580 cur->set_next_young_region(NULL); |
342 | 4581 } else { |
4582 int index = cur->young_index_in_cset(); | |
4583 guarantee( index == -1, "invariant" ); | |
4584 } | |
4585 | |
4586 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4587 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4588 "invariant" ); | |
4589 | |
4590 if (!cur->evacuation_failed()) { | |
4591 // And the region is empty. | |
4592 assert(!cur->is_empty(), | |
4593 "Should not have empty regions in a CS."); | |
4594 free_region(cur); | |
4595 } else { | |
4596 cur->uninstall_surv_rate_group(); | |
4597 if (cur->is_young()) | |
4598 cur->set_young_index_in_cset(-1); | |
4599 cur->set_not_young(); | |
4600 cur->set_evacuation_failed(false); | |
4601 } | |
4602 cur = next; | |
4603 } | |
4604 | |
4605 policy->record_max_rs_lengths(rs_lengths); | |
4606 policy->cset_regions_freed(); | |
4607 | |
4608 double end_sec = os::elapsedTime(); | |
4609 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4610 if (non_young) | |
4611 non_young_time_ms += elapsed_ms; | |
4612 else | |
4613 young_time_ms += elapsed_ms; | |
4614 | |
4615 policy->record_young_free_cset_time_ms(young_time_ms); | |
4616 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4617 } | |
4618 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4619 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4620 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4621 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4622 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4623 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4624 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4625 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4626 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4627 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4628 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4629 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4630 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4631 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4632 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4633 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4634 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4635 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4636 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4637 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4638 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4639 |
342 | 4640 HeapRegion* |
4641 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4642 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4643 HeapRegion* res = pop_unclean_region_list_locked(); | |
4644 if (res != NULL) { | |
4645 assert(!res->continuesHumongous() && | |
4646 res->zero_fill_state() != HeapRegion::Allocated, | |
4647 "Only free regions on unclean list."); | |
4648 if (zero_filled) { | |
4649 res->ensure_zero_filled_locked(); | |
4650 res->set_zero_fill_allocated(); | |
4651 } | |
4652 } | |
4653 return res; | |
4654 } | |
4655 | |
4656 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4657 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4658 return alloc_region_from_unclean_list_locked(zero_filled); | |
4659 } | |
4660 | |
4661 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4662 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4663 put_region_on_unclean_list_locked(r); | |
4664 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4665 } | |
4666 | |
4667 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4668 MutexLockerEx x(Cleanup_mon); | |
4669 set_unclean_regions_coming_locked(b); | |
4670 } | |
4671 | |
4672 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4673 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4674 _unclean_regions_coming = b; | |
4675 // Wake up mutator threads that might be waiting for completeCleanup to | |
4676 // finish. | |
4677 if (!b) Cleanup_mon->notify_all(); | |
4678 } | |
4679 | |
4680 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4681 MutexLockerEx x(Cleanup_mon); | |
4682 wait_for_cleanup_complete_locked(); | |
4683 } | |
4684 | |
4685 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4686 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4687 while (_unclean_regions_coming) { | |
4688 Cleanup_mon->wait(); | |
4689 } | |
4690 } | |
4691 | |
4692 void | |
4693 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4694 assert(ZF_mon->owned_by_self(), "precondition."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4695 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4696 if (r->is_gc_alloc_region()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4697 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4698 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4699 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4700 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4701 region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4702 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4703 #endif |
342 | 4704 _unclean_region_list.insert_before_head(r); |
4705 } | |
4706 | |
4707 void | |
4708 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4709 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4710 prepend_region_list_on_unclean_list_locked(list); | |
4711 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4712 } | |
4713 | |
4714 void | |
4715 G1CollectedHeap:: | |
4716 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4717 assert(ZF_mon->owned_by_self(), "precondition."); | |
4718 _unclean_region_list.prepend_list(list); | |
4719 } | |
4720 | |
4721 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4722 assert(ZF_mon->owned_by_self(), "precondition."); | |
4723 HeapRegion* res = _unclean_region_list.pop(); | |
4724 if (res != NULL) { | |
4725 // Inform ZF thread that there's a new unclean head. | |
4726 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4727 ZF_mon->notify_all(); | |
4728 } | |
4729 return res; | |
4730 } | |
4731 | |
4732 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4733 assert(ZF_mon->owned_by_self(), "precondition."); | |
4734 return _unclean_region_list.hd(); | |
4735 } | |
4736 | |
4737 | |
4738 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4739 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4740 HeapRegion* r = peek_unclean_region_list_locked(); | |
4741 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
4742 // Result of below must be equal to "r", since we hold the lock. | |
4743 (void)pop_unclean_region_list_locked(); | |
4744 put_free_region_on_list_locked(r); | |
4745 return true; | |
4746 } else { | |
4747 return false; | |
4748 } | |
4749 } | |
4750 | |
4751 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
4752 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4753 return move_cleaned_region_to_free_list_locked(); | |
4754 } | |
4755 | |
4756 | |
4757 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
4758 assert(ZF_mon->owned_by_self(), "precondition."); | |
4759 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4760 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
4761 "Regions on free list must be zero filled"); | |
4762 assert(!r->isHumongous(), "Must not be humongous."); | |
4763 assert(r->is_empty(), "Better be empty"); | |
4764 assert(!r->is_on_free_list(), | |
4765 "Better not already be on free list"); | |
4766 assert(!r->is_on_unclean_list(), | |
4767 "Better not already be on unclean list"); | |
4768 r->set_on_free_list(true); | |
4769 r->set_next_on_free_list(_free_region_list); | |
4770 _free_region_list = r; | |
4771 _free_region_list_size++; | |
4772 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4773 } | |
4774 | |
4775 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
4776 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4777 put_free_region_on_list_locked(r); | |
4778 } | |
4779 | |
4780 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
4781 assert(ZF_mon->owned_by_self(), "precondition."); | |
4782 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4783 HeapRegion* res = _free_region_list; | |
4784 if (res != NULL) { | |
4785 _free_region_list = res->next_from_free_list(); | |
4786 _free_region_list_size--; | |
4787 res->set_on_free_list(false); | |
4788 res->set_next_on_free_list(NULL); | |
4789 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4790 } | |
4791 return res; | |
4792 } | |
4793 | |
4794 | |
4795 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
4796 // By self, or on behalf of self. | |
4797 assert(Heap_lock->is_locked(), "Precondition"); | |
4798 HeapRegion* res = NULL; | |
4799 bool first = true; | |
4800 while (res == NULL) { | |
4801 if (zero_filled || !first) { | |
4802 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4803 res = pop_free_region_list_locked(); | |
4804 if (res != NULL) { | |
4805 assert(!res->zero_fill_is_allocated(), | |
4806 "No allocated regions on free list."); | |
4807 res->set_zero_fill_allocated(); | |
4808 } else if (!first) { | |
4809 break; // We tried both, time to return NULL. | |
4810 } | |
4811 } | |
4812 | |
4813 if (res == NULL) { | |
4814 res = alloc_region_from_unclean_list(zero_filled); | |
4815 } | |
4816 assert(res == NULL || | |
4817 !zero_filled || | |
4818 res->zero_fill_is_allocated(), | |
4819 "We must have allocated the region we're returning"); | |
4820 first = false; | |
4821 } | |
4822 return res; | |
4823 } | |
4824 | |
4825 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
4826 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4827 { | |
4828 HeapRegion* prev = NULL; | |
4829 HeapRegion* cur = _unclean_region_list.hd(); | |
4830 while (cur != NULL) { | |
4831 HeapRegion* next = cur->next_from_unclean_list(); | |
4832 if (cur->zero_fill_is_allocated()) { | |
4833 // Remove from the list. | |
4834 if (prev == NULL) { | |
4835 (void)_unclean_region_list.pop(); | |
4836 } else { | |
4837 _unclean_region_list.delete_after(prev); | |
4838 } | |
4839 cur->set_on_unclean_list(false); | |
4840 cur->set_next_on_unclean_list(NULL); | |
4841 } else { | |
4842 prev = cur; | |
4843 } | |
4844 cur = next; | |
4845 } | |
4846 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
4847 "Inv"); | |
4848 } | |
4849 | |
4850 { | |
4851 HeapRegion* prev = NULL; | |
4852 HeapRegion* cur = _free_region_list; | |
4853 while (cur != NULL) { | |
4854 HeapRegion* next = cur->next_from_free_list(); | |
4855 if (cur->zero_fill_is_allocated()) { | |
4856 // Remove from the list. | |
4857 if (prev == NULL) { | |
4858 _free_region_list = cur->next_from_free_list(); | |
4859 } else { | |
4860 prev->set_next_on_free_list(cur->next_from_free_list()); | |
4861 } | |
4862 cur->set_on_free_list(false); | |
4863 cur->set_next_on_free_list(NULL); | |
4864 _free_region_list_size--; | |
4865 } else { | |
4866 prev = cur; | |
4867 } | |
4868 cur = next; | |
4869 } | |
4870 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4871 } | |
4872 } | |
4873 | |
4874 bool G1CollectedHeap::verify_region_lists() { | |
4875 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4876 return verify_region_lists_locked(); | |
4877 } | |
4878 | |
4879 bool G1CollectedHeap::verify_region_lists_locked() { | |
4880 HeapRegion* unclean = _unclean_region_list.hd(); | |
4881 while (unclean != NULL) { | |
4882 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
4883 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
4884 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
4885 "Everything else is possible."); | |
4886 unclean = unclean->next_from_unclean_list(); | |
4887 } | |
4888 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
4889 | |
4890 HeapRegion* free_r = _free_region_list; | |
4891 while (free_r != NULL) { | |
4892 assert(free_r->is_on_free_list(), "Well, it is!"); | |
4893 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
4894 switch (free_r->zero_fill_state()) { | |
4895 case HeapRegion::NotZeroFilled: | |
4896 case HeapRegion::ZeroFilling: | |
4897 guarantee(false, "Should not be on free list."); | |
4898 break; | |
4899 default: | |
4900 // Everything else is possible. | |
4901 break; | |
4902 } | |
4903 free_r = free_r->next_from_free_list(); | |
4904 } | |
4905 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
4906 // If we didn't do an assertion... | |
4907 return true; | |
4908 } | |
4909 | |
4910 size_t G1CollectedHeap::free_region_list_length() { | |
4911 assert(ZF_mon->owned_by_self(), "precondition."); | |
4912 size_t len = 0; | |
4913 HeapRegion* cur = _free_region_list; | |
4914 while (cur != NULL) { | |
4915 len++; | |
4916 cur = cur->next_from_free_list(); | |
4917 } | |
4918 return len; | |
4919 } | |
4920 | |
4921 size_t G1CollectedHeap::unclean_region_list_length() { | |
4922 assert(ZF_mon->owned_by_self(), "precondition."); | |
4923 return _unclean_region_list.length(); | |
4924 } | |
4925 | |
4926 size_t G1CollectedHeap::n_regions() { | |
4927 return _hrs->length(); | |
4928 } | |
4929 | |
4930 size_t G1CollectedHeap::max_regions() { | |
4931 return | |
4932 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
4933 HeapRegion::GrainBytes; | |
4934 } | |
4935 | |
4936 size_t G1CollectedHeap::free_regions() { | |
4937 /* Possibly-expensive assert. | |
4938 assert(_free_regions == count_free_regions(), | |
4939 "_free_regions is off."); | |
4940 */ | |
4941 return _free_regions; | |
4942 } | |
4943 | |
4944 bool G1CollectedHeap::should_zf() { | |
4945 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
4946 } | |
4947 | |
4948 class RegionCounter: public HeapRegionClosure { | |
4949 size_t _n; | |
4950 public: | |
4951 RegionCounter() : _n(0) {} | |
4952 bool doHeapRegion(HeapRegion* r) { | |
677 | 4953 if (r->is_empty()) { |
342 | 4954 assert(!r->isHumongous(), "H regions should not be empty."); |
4955 _n++; | |
4956 } | |
4957 return false; | |
4958 } | |
4959 int res() { return (int) _n; } | |
4960 }; | |
4961 | |
4962 size_t G1CollectedHeap::count_free_regions() { | |
4963 RegionCounter rc; | |
4964 heap_region_iterate(&rc); | |
4965 size_t n = rc.res(); | |
4966 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
4967 n--; | |
4968 return n; | |
4969 } | |
4970 | |
4971 size_t G1CollectedHeap::count_free_regions_list() { | |
4972 size_t n = 0; | |
4973 size_t o = 0; | |
4974 ZF_mon->lock_without_safepoint_check(); | |
4975 HeapRegion* cur = _free_region_list; | |
4976 while (cur != NULL) { | |
4977 cur = cur->next_from_free_list(); | |
4978 n++; | |
4979 } | |
4980 size_t m = unclean_region_list_length(); | |
4981 ZF_mon->unlock(); | |
4982 return n + m; | |
4983 } | |
4984 | |
4985 bool G1CollectedHeap::should_set_young_locked() { | |
4986 assert(heap_lock_held_for_gc(), | |
4987 "the heap lock should already be held by or for this thread"); | |
4988 return (g1_policy()->in_young_gc_mode() && | |
4989 g1_policy()->should_add_next_region_to_young_list()); | |
4990 } | |
4991 | |
4992 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
4993 assert(heap_lock_held_for_gc(), | |
4994 "the heap lock should already be held by or for this thread"); | |
4995 _young_list->push_region(hr); | |
4996 g1_policy()->set_region_short_lived(hr); | |
4997 } | |
4998 | |
4999 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5000 private: | |
5001 bool _success; | |
5002 public: | |
5003 NoYoungRegionsClosure() : _success(true) { } | |
5004 bool doHeapRegion(HeapRegion* r) { | |
5005 if (r->is_young()) { | |
5006 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5007 r->bottom(), r->end()); | |
5008 _success = false; | |
5009 } | |
5010 return false; | |
5011 } | |
5012 bool success() { return _success; } | |
5013 }; | |
5014 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5015 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5016 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5017 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5018 if (check_heap) { |
342 | 5019 NoYoungRegionsClosure closure; |
5020 heap_region_iterate(&closure); | |
5021 ret = ret && closure.success(); | |
5022 } | |
5023 | |
5024 return ret; | |
5025 } | |
5026 | |
5027 void G1CollectedHeap::empty_young_list() { | |
5028 assert(heap_lock_held_for_gc(), | |
5029 "the heap lock should already be held by or for this thread"); | |
5030 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5031 | |
5032 _young_list->empty_list(); | |
5033 } | |
5034 | |
5035 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5036 bool no_allocs = true; | |
5037 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5038 HeapRegion* r = _gc_alloc_regions[ap]; | |
5039 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5040 } | |
5041 return no_allocs; | |
5042 } | |
5043 | |
545 | 5044 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5045 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5046 HeapRegion* r = _gc_alloc_regions[ap]; | |
5047 if (r != NULL) { | |
5048 // Check for aliases. | |
5049 bool has_processed_alias = false; | |
5050 for (int i = 0; i < ap; ++i) { | |
5051 if (_gc_alloc_regions[i] == r) { | |
5052 has_processed_alias = true; | |
5053 break; | |
5054 } | |
5055 } | |
5056 if (!has_processed_alias) { | |
545 | 5057 retire_alloc_region(r, false /* par */); |
342 | 5058 } |
5059 } | |
5060 } | |
5061 } | |
5062 | |
5063 | |
5064 // Done at the start of full GC. | |
5065 void G1CollectedHeap::tear_down_region_lists() { | |
5066 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5067 while (pop_unclean_region_list_locked() != NULL) ; | |
5068 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1394
diff
changeset
|
5069 "Postconditions of loop."); |
342 | 5070 while (pop_free_region_list_locked() != NULL) ; |
5071 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5072 if (_free_region_list_size != 0) { | |
5073 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
5074 print_on(gclog_or_tty, true /* extended */); |
342 | 5075 } |
5076 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5077 } | |
5078 | |
5079 | |
5080 class RegionResetter: public HeapRegionClosure { | |
5081 G1CollectedHeap* _g1; | |
5082 int _n; | |
5083 public: | |
5084 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5085 bool doHeapRegion(HeapRegion* r) { | |
5086 if (r->continuesHumongous()) return false; | |
5087 if (r->top() > r->bottom()) { | |
5088 if (r->top() < r->end()) { | |
5089 Copy::fill_to_words(r->top(), | |
5090 pointer_delta(r->end(), r->top())); | |
5091 } | |
5092 r->set_zero_fill_allocated(); | |
5093 } else { | |
5094 assert(r->is_empty(), "tautology"); | |
677 | 5095 _n++; |
5096 switch (r->zero_fill_state()) { | |
342 | 5097 case HeapRegion::NotZeroFilled: |
5098 case HeapRegion::ZeroFilling: | |
5099 _g1->put_region_on_unclean_list_locked(r); | |
5100 break; | |
5101 case HeapRegion::Allocated: | |
5102 r->set_zero_fill_complete(); | |
5103 // no break; go on to put on free list. | |
5104 case HeapRegion::ZeroFilled: | |
5105 _g1->put_free_region_on_list_locked(r); | |
5106 break; | |
5107 } | |
5108 } | |
5109 return false; | |
5110 } | |
5111 | |
5112 int getFreeRegionCount() {return _n;} | |
5113 }; | |
5114 | |
5115 // Done at the end of full GC. | |
5116 void G1CollectedHeap::rebuild_region_lists() { | |
5117 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5118 // This needs to go at the end of the full GC. | |
5119 RegionResetter rs; | |
5120 heap_region_iterate(&rs); | |
5121 _free_regions = rs.getFreeRegionCount(); | |
5122 // Tell the ZF thread it may have work to do. | |
5123 if (should_zf()) ZF_mon->notify_all(); | |
5124 } | |
5125 | |
5126 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5127 G1CollectedHeap* _g1; | |
5128 int _n; | |
5129 public: | |
5130 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5131 bool doHeapRegion(HeapRegion* r) { | |
5132 if (r->continuesHumongous()) return false; | |
5133 if (r->top() > r->bottom()) { | |
5134 // There are assertions in "set_zero_fill_needed()" below that | |
5135 // require top() == bottom(), so this is technically illegal. | |
5136 // We'll skirt the law here, by making that true temporarily. | |
5137 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5138 r->set_top(r->bottom())); | |
5139 r->set_zero_fill_needed(); | |
5140 DEBUG_ONLY(r->set_top(save_top)); | |
5141 } | |
5142 return false; | |
5143 } | |
5144 }; | |
5145 | |
5146 // Done at the start of full GC. | |
5147 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5148 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5149 // This needs to go at the end of the full GC. | |
5150 UsedRegionsNeedZeroFillSetter rs; | |
5151 heap_region_iterate(&rs); | |
5152 } | |
5153 | |
5154 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5155 _refine_cte_cl->set_concurrent(concurrent); | |
5156 } | |
5157 | |
5158 #ifndef PRODUCT | |
5159 | |
5160 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5161 public: | |
5162 bool doHeapRegion(HeapRegion *r) { | |
5163 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5164 if (r != NULL) { | |
5165 if (r->is_on_free_list()) | |
5166 gclog_or_tty->print("Free "); | |
5167 if (r->is_young()) | |
5168 gclog_or_tty->print("Young "); | |
5169 if (r->isHumongous()) | |
5170 gclog_or_tty->print("Is Humongous "); | |
5171 r->print(); | |
5172 } | |
5173 return false; | |
5174 } | |
5175 }; | |
5176 | |
5177 class SortHeapRegionClosure : public HeapRegionClosure { | |
5178 size_t young_regions,free_regions, unclean_regions; | |
5179 size_t hum_regions, count; | |
5180 size_t unaccounted, cur_unclean, cur_alloc; | |
5181 size_t total_free; | |
5182 HeapRegion* cur; | |
5183 public: | |
5184 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5185 free_regions(0), unclean_regions(0), | |
5186 hum_regions(0), | |
5187 count(0), unaccounted(0), | |
5188 cur_alloc(0), total_free(0) | |
5189 {} | |
5190 bool doHeapRegion(HeapRegion *r) { | |
5191 count++; | |
5192 if (r->is_on_free_list()) free_regions++; | |
5193 else if (r->is_on_unclean_list()) unclean_regions++; | |
5194 else if (r->isHumongous()) hum_regions++; | |
5195 else if (r->is_young()) young_regions++; | |
5196 else if (r == cur) cur_alloc++; | |
5197 else unaccounted++; | |
5198 return false; | |
5199 } | |
5200 void print() { | |
5201 total_free = free_regions + unclean_regions; | |
5202 gclog_or_tty->print("%d regions\n", count); | |
5203 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5204 total_free, free_regions, unclean_regions); | |
5205 gclog_or_tty->print("%d humongous %d young\n", | |
5206 hum_regions, young_regions); | |
5207 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5208 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5209 } | |
5210 }; | |
5211 | |
5212 void G1CollectedHeap::print_region_counts() { | |
5213 SortHeapRegionClosure sc(_cur_alloc_region); | |
5214 PrintHeapRegionClosure cl; | |
5215 heap_region_iterate(&cl); | |
5216 heap_region_iterate(&sc); | |
5217 sc.print(); | |
5218 print_region_accounting_info(); | |
5219 }; | |
5220 | |
5221 bool G1CollectedHeap::regions_accounted_for() { | |
5222 // TODO: regions accounting for young/survivor/tenured | |
5223 return true; | |
5224 } | |
5225 | |
5226 bool G1CollectedHeap::print_region_accounting_info() { | |
5227 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5228 free_regions(), | |
5229 count_free_regions(), count_free_regions_list(), | |
5230 _free_region_list_size, _unclean_region_list.sz()); | |
5231 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5232 (_cur_alloc_region == NULL ? 0 : 1)); | |
5233 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5234 | |
5235 // TODO: check regions accounting for young/survivor/tenured | |
5236 return true; | |
5237 } | |
5238 | |
5239 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5240 HeapRegion* hr = heap_region_containing(p); | |
5241 if (hr == NULL) { | |
5242 return is_in_permanent(p); | |
5243 } else { | |
5244 return hr->is_in(p); | |
5245 } | |
5246 } | |
941 | 5247 #endif // !PRODUCT |
342 | 5248 |
5249 void G1CollectedHeap::g1_unimplemented() { | |
5250 // Unimplemented(); | |
5251 } |