Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1666:5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
Summary: During concurrent refinment, filter cards in young regions after it has been determined that the region has been allocated from and the young type of the region has been set.
Reviewed-by: iveresov, tonyp, jcoomes
author | johnc |
---|---|
date | Mon, 19 Jul 2010 11:06:34 -0700 |
parents | 4e5661ba9d98 |
children | 2d160770d2e5 |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
28 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
29 |
342 | 30 // turn it on so that the contents of the young list (scan-only / |
31 // to-be-collected) are printed at "strategic" points before / during | |
32 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
33 #define YOUNG_LIST_VERBOSE 0 |
342 | 34 // CURRENT STATUS |
35 // This file is under construction. Search for "FIXME". | |
36 | |
37 // INVARIANTS/NOTES | |
38 // | |
39 // All allocation activity covered by the G1CollectedHeap interface is | |
40 // serialized by acquiring the HeapLock. This happens in | |
41 // mem_allocate_work, which all such allocation functions call. | |
42 // (Note that this does not apply to TLAB allocation, which is not part | |
43 // of this interface: it is done by clients of this interface.) | |
44 | |
45 // Local to this file. | |
46 | |
47 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
48 SuspendibleThreadSet* _sts; | |
49 G1RemSet* _g1rs; | |
50 ConcurrentG1Refine* _cg1r; | |
51 bool _concurrent; | |
52 public: | |
53 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
54 G1RemSet* g1rs, | |
55 ConcurrentG1Refine* cg1r) : | |
56 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
57 {} | |
58 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
59 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); | |
60 if (_concurrent && _sts->should_yield()) { | |
61 // Caller will actually yield. | |
62 return false; | |
63 } | |
64 // Otherwise, we finished successfully; return true. | |
65 return true; | |
66 } | |
67 void set_concurrent(bool b) { _concurrent = b; } | |
68 }; | |
69 | |
70 | |
71 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
72 int _calls; | |
73 G1CollectedHeap* _g1h; | |
74 CardTableModRefBS* _ctbs; | |
75 int _histo[256]; | |
76 public: | |
77 ClearLoggedCardTableEntryClosure() : | |
78 _calls(0) | |
79 { | |
80 _g1h = G1CollectedHeap::heap(); | |
81 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
82 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
83 } | |
84 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
85 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
86 _calls++; | |
87 unsigned char* ujb = (unsigned char*)card_ptr; | |
88 int ind = (int)(*ujb); | |
89 _histo[ind]++; | |
90 *card_ptr = -1; | |
91 } | |
92 return true; | |
93 } | |
94 int calls() { return _calls; } | |
95 void print_histo() { | |
96 gclog_or_tty->print_cr("Card table value histogram:"); | |
97 for (int i = 0; i < 256; i++) { | |
98 if (_histo[i] != 0) { | |
99 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
100 } | |
101 } | |
102 } | |
103 }; | |
104 | |
105 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
106 int _calls; | |
107 G1CollectedHeap* _g1h; | |
108 CardTableModRefBS* _ctbs; | |
109 public: | |
110 RedirtyLoggedCardTableEntryClosure() : | |
111 _calls(0) | |
112 { | |
113 _g1h = G1CollectedHeap::heap(); | |
114 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
115 } | |
116 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
117 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
118 _calls++; | |
119 *card_ptr = 0; | |
120 } | |
121 return true; | |
122 } | |
123 int calls() { return _calls; } | |
124 }; | |
125 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
126 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
127 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
128 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
129 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
130 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
131 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
132 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
133 |
342 | 134 YoungList::YoungList(G1CollectedHeap* g1h) |
135 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
136 _length(0), |
342 | 137 _last_sampled_rs_lengths(0), |
545 | 138 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 139 { |
140 guarantee( check_list_empty(false), "just making sure..." ); | |
141 } | |
142 | |
143 void YoungList::push_region(HeapRegion *hr) { | |
144 assert(!hr->is_young(), "should not already be young"); | |
145 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
146 | |
147 hr->set_next_young_region(_head); | |
148 _head = hr; | |
149 | |
150 hr->set_young(); | |
151 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
152 ++_length; | |
153 } | |
154 | |
155 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 156 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 157 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
158 | |
159 hr->set_next_young_region(_survivor_head); | |
160 if (_survivor_head == NULL) { | |
545 | 161 _survivor_tail = hr; |
342 | 162 } |
163 _survivor_head = hr; | |
164 | |
165 ++_survivor_length; | |
166 } | |
167 | |
168 void YoungList::empty_list(HeapRegion* list) { | |
169 while (list != NULL) { | |
170 HeapRegion* next = list->get_next_young_region(); | |
171 list->set_next_young_region(NULL); | |
172 list->uninstall_surv_rate_group(); | |
173 list->set_not_young(); | |
174 list = next; | |
175 } | |
176 } | |
177 | |
178 void YoungList::empty_list() { | |
179 assert(check_list_well_formed(), "young list should be well formed"); | |
180 | |
181 empty_list(_head); | |
182 _head = NULL; | |
183 _length = 0; | |
184 | |
185 empty_list(_survivor_head); | |
186 _survivor_head = NULL; | |
545 | 187 _survivor_tail = NULL; |
342 | 188 _survivor_length = 0; |
189 | |
190 _last_sampled_rs_lengths = 0; | |
191 | |
192 assert(check_list_empty(false), "just making sure..."); | |
193 } | |
194 | |
195 bool YoungList::check_list_well_formed() { | |
196 bool ret = true; | |
197 | |
198 size_t length = 0; | |
199 HeapRegion* curr = _head; | |
200 HeapRegion* last = NULL; | |
201 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
202 if (!curr->is_young()) { |
342 | 203 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
204 "incorrectly tagged (y: %d, surv: %d)", |
342 | 205 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
206 curr->is_young(), curr->is_survivor()); |
342 | 207 ret = false; |
208 } | |
209 ++length; | |
210 last = curr; | |
211 curr = curr->get_next_young_region(); | |
212 } | |
213 ret = ret && (length == _length); | |
214 | |
215 if (!ret) { | |
216 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
217 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
218 length, _length); | |
219 } | |
220 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
221 return ret; |
342 | 222 } |
223 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
224 bool YoungList::check_list_empty(bool check_sample) { |
342 | 225 bool ret = true; |
226 | |
227 if (_length != 0) { | |
228 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
229 _length); | |
230 ret = false; | |
231 } | |
232 if (check_sample && _last_sampled_rs_lengths != 0) { | |
233 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
234 ret = false; | |
235 } | |
236 if (_head != NULL) { | |
237 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
238 ret = false; | |
239 } | |
240 if (!ret) { | |
241 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
242 } | |
243 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
244 return ret; |
342 | 245 } |
246 | |
247 void | |
248 YoungList::rs_length_sampling_init() { | |
249 _sampled_rs_lengths = 0; | |
250 _curr = _head; | |
251 } | |
252 | |
253 bool | |
254 YoungList::rs_length_sampling_more() { | |
255 return _curr != NULL; | |
256 } | |
257 | |
258 void | |
259 YoungList::rs_length_sampling_next() { | |
260 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
261 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
262 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
263 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
264 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
265 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
266 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
267 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
268 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
269 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
270 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
272 |
342 | 273 _curr = _curr->get_next_young_region(); |
274 if (_curr == NULL) { | |
275 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
276 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
277 } | |
278 } | |
279 | |
280 void | |
281 YoungList::reset_auxilary_lists() { | |
282 guarantee( is_empty(), "young list should be empty" ); | |
283 assert(check_list_well_formed(), "young list should be well formed"); | |
284 | |
285 // Add survivor regions to SurvRateGroup. | |
286 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 287 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 |
342 | 289 for (HeapRegion* curr = _survivor_head; |
290 curr != NULL; | |
291 curr = curr->get_next_young_region()) { | |
292 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 298 } |
299 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
300 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
301 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
302 _length = _survivor_length; |
342 | 303 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
304 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
305 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
306 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
307 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
308 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
309 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
310 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
311 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
312 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
313 |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 315 |
316 assert(check_list_well_formed(), "young list should be well formed"); | |
317 } | |
318 | |
319 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 322 |
323 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
324 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
325 HeapRegion *curr = lists[list]; | |
326 if (curr == NULL) | |
327 gclog_or_tty->print_cr(" empty"); | |
328 while (curr != NULL) { | |
329 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
330 "age: %4d, y: %d, surv: %d", |
342 | 331 curr->bottom(), curr->end(), |
332 curr->top(), | |
333 curr->prev_top_at_mark_start(), | |
334 curr->next_top_at_mark_start(), | |
335 curr->top_at_conc_mark_count(), | |
336 curr->age_in_surv_rate_group_cond(), | |
337 curr->is_young(), | |
338 curr->is_survivor()); | |
339 curr = curr->get_next_young_region(); | |
340 } | |
341 } | |
342 | |
343 gclog_or_tty->print_cr(""); | |
344 } | |
345 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
346 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
347 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
348 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
349 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
350 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
351 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
352 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
353 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
354 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
355 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
356 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
357 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
358 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
359 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
360 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
361 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
362 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
363 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
364 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
365 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
366 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
367 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
368 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
369 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
370 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
371 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
372 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 |
342 | 399 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 400 _cg1r->stop(); |
342 | 401 _czft->stop(); |
402 _cmThread->stop(); | |
403 } | |
404 | |
405 | |
406 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
407 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
408 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
409 | |
410 // Count the dirty cards at the start. | |
411 CountNonCleanMemRegionClosure count1(this); | |
412 ct_bs->mod_card_iterate(&count1); | |
413 int orig_count = count1.n(); | |
414 | |
415 // First clear the logged cards. | |
416 ClearLoggedCardTableEntryClosure clear; | |
417 dcqs.set_closure(&clear); | |
418 dcqs.apply_closure_to_all_completed_buffers(); | |
419 dcqs.iterate_closure_all_threads(false); | |
420 clear.print_histo(); | |
421 | |
422 // Now ensure that there's no dirty cards. | |
423 CountNonCleanMemRegionClosure count2(this); | |
424 ct_bs->mod_card_iterate(&count2); | |
425 if (count2.n() != 0) { | |
426 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
427 count2.n(), orig_count); | |
428 } | |
429 guarantee(count2.n() == 0, "Card table should be clean."); | |
430 | |
431 RedirtyLoggedCardTableEntryClosure redirty; | |
432 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
433 dcqs.apply_closure_to_all_completed_buffers(); | |
434 dcqs.iterate_closure_all_threads(false); | |
435 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
436 clear.calls(), orig_count); | |
437 guarantee(redirty.calls() == clear.calls(), | |
438 "Or else mechanism is broken."); | |
439 | |
440 CountNonCleanMemRegionClosure count3(this); | |
441 ct_bs->mod_card_iterate(&count3); | |
442 if (count3.n() != orig_count) { | |
443 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
444 orig_count, count3.n()); | |
445 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
446 } | |
447 | |
448 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
449 } | |
450 | |
451 // Private class members. | |
452 | |
453 G1CollectedHeap* G1CollectedHeap::_g1h; | |
454 | |
455 // Private methods. | |
456 | |
457 // Finds a HeapRegion that can be used to allocate a given size of block. | |
458 | |
459 | |
460 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
461 bool do_expand, | |
462 bool zero_filled) { | |
463 ConcurrentZFThread::note_region_alloc(); | |
464 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
465 if (res == NULL && do_expand) { | |
466 expand(word_size * HeapWordSize); | |
467 res = alloc_free_region_from_lists(zero_filled); | |
468 assert(res == NULL || | |
469 (!res->isHumongous() && | |
470 (!zero_filled || | |
471 res->zero_fill_state() == HeapRegion::Allocated)), | |
472 "Alloc Regions must be zero filled (and non-H)"); | |
473 } | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
474 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
475 if (res->is_empty()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
476 _free_regions--; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
477 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
478 assert(!res->isHumongous() && |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
479 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
480 err_msg("Non-young alloc Regions must be zero filled (and non-H):" |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
481 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
482 res->isHumongous(), zero_filled, res->zero_fill_state())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
483 assert(!res->is_on_unclean_list(), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
484 "Alloc Regions must not be on the unclean list"); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
485 if (G1PrintHeapRegions) { |
342 | 486 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
487 "top "PTR_FORMAT, | |
488 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
489 } | |
490 } | |
491 return res; | |
492 } | |
493 | |
494 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
495 size_t word_size, | |
496 bool zero_filled) { | |
497 HeapRegion* alloc_region = NULL; | |
498 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
499 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
500 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 501 alloc_region->set_survivor(); |
342 | 502 } |
503 ++_gc_alloc_region_counts[purpose]; | |
504 } else { | |
505 g1_policy()->note_alloc_region_limit_reached(purpose); | |
506 } | |
507 return alloc_region; | |
508 } | |
509 | |
510 // If could fit into free regions w/o expansion, try. | |
511 // Otherwise, if can expand, do so. | |
512 // Otherwise, if using ex regions might help, try with ex given back. | |
513 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
514 assert(regions_accounted_for(), "Region leakage!"); | |
515 | |
516 // We can't allocate H regions while cleanupComplete is running, since | |
517 // some of the regions we find to be empty might not yet be added to the | |
518 // unclean list. (If we're already at a safepoint, this call is | |
519 // unnecessary, not to mention wrong.) | |
520 if (!SafepointSynchronize::is_at_safepoint()) | |
521 wait_for_cleanup_complete(); | |
522 | |
523 size_t num_regions = | |
524 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
525 | |
526 // Special case if < one region??? | |
527 | |
528 // Remember the ft size. | |
529 size_t x_size = expansion_regions(); | |
530 | |
531 HeapWord* res = NULL; | |
532 bool eliminated_allocated_from_lists = false; | |
533 | |
534 // Can the allocation potentially fit in the free regions? | |
535 if (free_regions() >= num_regions) { | |
536 res = _hrs->obj_allocate(word_size); | |
537 } | |
538 if (res == NULL) { | |
539 // Try expansion. | |
540 size_t fs = _hrs->free_suffix(); | |
541 if (fs + x_size >= num_regions) { | |
542 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
543 res = _hrs->obj_allocate(word_size); | |
544 assert(res != NULL, "This should have worked."); | |
545 } else { | |
546 // Expansion won't help. Are there enough free regions if we get rid | |
547 // of reservations? | |
548 size_t avail = free_regions(); | |
549 if (avail >= num_regions) { | |
550 res = _hrs->obj_allocate(word_size); | |
551 if (res != NULL) { | |
552 remove_allocated_regions_from_lists(); | |
553 eliminated_allocated_from_lists = true; | |
554 } | |
555 } | |
556 } | |
557 } | |
558 if (res != NULL) { | |
559 // Increment by the number of regions allocated. | |
560 // FIXME: Assumes regions all of size GrainBytes. | |
561 #ifndef PRODUCT | |
562 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
563 HeapRegion::GrainWords)); | |
564 #endif | |
565 if (!eliminated_allocated_from_lists) | |
566 remove_allocated_regions_from_lists(); | |
567 _summary_bytes_used += word_size * HeapWordSize; | |
568 _free_regions -= num_regions; | |
569 _num_humongous_regions += (int) num_regions; | |
570 } | |
571 assert(regions_accounted_for(), "Region Leakage"); | |
572 return res; | |
573 } | |
574 | |
575 HeapWord* | |
576 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
577 bool permit_collection_pause) { | |
578 HeapWord* res = NULL; | |
579 HeapRegion* allocated_young_region = NULL; | |
580 | |
581 assert( SafepointSynchronize::is_at_safepoint() || | |
582 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
583 | |
584 if (isHumongous(word_size)) { | |
585 // Allocation of a humongous object can, in a sense, complete a | |
586 // partial region, if the previous alloc was also humongous, and | |
587 // caused the test below to succeed. | |
588 if (permit_collection_pause) | |
589 do_collection_pause_if_appropriate(word_size); | |
590 res = humongousObjAllocate(word_size); | |
591 assert(_cur_alloc_region == NULL | |
592 || !_cur_alloc_region->isHumongous(), | |
593 "Prevent a regression of this bug."); | |
594 | |
595 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
596 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
597 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
598 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
599 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
600 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
601 wait_for_cleanup_complete(); |
342 | 602 // If we do a collection pause, this will be reset to a non-NULL |
603 // value. If we don't, nulling here ensures that we allocate a new | |
604 // region below. | |
605 if (_cur_alloc_region != NULL) { | |
606 // We're finished with the _cur_alloc_region. | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
607 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
608 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
609 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
610 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
611 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
612 } |
342 | 613 _summary_bytes_used += _cur_alloc_region->used(); |
614 _cur_alloc_region = NULL; | |
615 } | |
616 assert(_cur_alloc_region == NULL, "Invariant."); | |
617 // Completion of a heap region is perhaps a good point at which to do | |
618 // a collection pause. | |
619 if (permit_collection_pause) | |
620 do_collection_pause_if_appropriate(word_size); | |
621 // Make sure we have an allocation region available. | |
622 if (_cur_alloc_region == NULL) { | |
623 if (!SafepointSynchronize::is_at_safepoint()) | |
624 wait_for_cleanup_complete(); | |
625 bool next_is_young = should_set_young_locked(); | |
626 // If the next region is not young, make sure it's zero-filled. | |
627 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
628 if (_cur_alloc_region != NULL) { | |
629 _summary_bytes_used -= _cur_alloc_region->used(); | |
630 if (next_is_young) { | |
631 set_region_short_lived_locked(_cur_alloc_region); | |
632 allocated_young_region = _cur_alloc_region; | |
633 } | |
634 } | |
635 } | |
636 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
637 "Prevent a regression of this bug."); | |
638 | |
639 // Now retry the allocation. | |
640 if (_cur_alloc_region != NULL) { | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
641 if (allocated_young_region != NULL) { |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
642 // We need to ensure that the store to top does not |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
643 // float above the setting of the young type. |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
644 OrderAccess::storestore(); |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
645 } |
342 | 646 res = _cur_alloc_region->allocate(word_size); |
647 } | |
648 } | |
649 | |
650 // NOTE: fails frequently in PRT | |
651 assert(regions_accounted_for(), "Region leakage!"); | |
652 | |
653 if (res != NULL) { | |
654 if (!SafepointSynchronize::is_at_safepoint()) { | |
655 assert( permit_collection_pause, "invariant" ); | |
656 assert( Heap_lock->owned_by_self(), "invariant" ); | |
657 Heap_lock->unlock(); | |
658 } | |
659 | |
660 if (allocated_young_region != NULL) { | |
661 HeapRegion* hr = allocated_young_region; | |
662 HeapWord* bottom = hr->bottom(); | |
663 HeapWord* end = hr->end(); | |
664 MemRegion mr(bottom, end); | |
665 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
666 } | |
667 } | |
668 | |
669 assert( SafepointSynchronize::is_at_safepoint() || | |
670 (res == NULL && Heap_lock->owned_by_self()) || | |
671 (res != NULL && !Heap_lock->owned_by_self()), | |
672 "post condition of the call" ); | |
673 | |
674 return res; | |
675 } | |
676 | |
677 HeapWord* | |
678 G1CollectedHeap::mem_allocate(size_t word_size, | |
679 bool is_noref, | |
680 bool is_tlab, | |
681 bool* gc_overhead_limit_was_exceeded) { | |
682 debug_only(check_for_valid_allocation_state()); | |
683 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
684 HeapWord* result = NULL; | |
685 | |
686 // Loop until the allocation is satisified, | |
687 // or unsatisfied after GC. | |
688 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
689 int gc_count_before; | |
690 { | |
691 Heap_lock->lock(); | |
692 result = attempt_allocation(word_size); | |
693 if (result != NULL) { | |
694 // attempt_allocation should have unlocked the heap lock | |
695 assert(is_in(result), "result not in heap"); | |
696 return result; | |
697 } | |
698 // Read the gc count while the heap lock is held. | |
699 gc_count_before = SharedHeap::heap()->total_collections(); | |
700 Heap_lock->unlock(); | |
701 } | |
702 | |
703 // Create the garbage collection operation... | |
704 VM_G1CollectForAllocation op(word_size, | |
705 gc_count_before); | |
706 | |
707 // ...and get the VM thread to execute it. | |
708 VMThread::execute(&op); | |
709 if (op.prologue_succeeded()) { | |
710 result = op.result(); | |
711 assert(result == NULL || is_in(result), "result not in heap"); | |
712 return result; | |
713 } | |
714 | |
715 // Give a warning if we seem to be looping forever. | |
716 if ((QueuedAllocationWarningCount > 0) && | |
717 (try_count % QueuedAllocationWarningCount == 0)) { | |
718 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
719 try_count); | |
720 } | |
721 } | |
722 } | |
723 | |
724 void G1CollectedHeap::abandon_cur_alloc_region() { | |
725 if (_cur_alloc_region != NULL) { | |
726 // We're finished with the _cur_alloc_region. | |
727 if (_cur_alloc_region->is_empty()) { | |
728 _free_regions++; | |
729 free_region(_cur_alloc_region); | |
730 } else { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
731 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
732 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
733 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
734 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
735 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
736 } |
342 | 737 _summary_bytes_used += _cur_alloc_region->used(); |
738 } | |
739 _cur_alloc_region = NULL; | |
740 } | |
741 } | |
742 | |
636 | 743 void G1CollectedHeap::abandon_gc_alloc_regions() { |
744 // first, make sure that the GC alloc region list is empty (it should!) | |
745 assert(_gc_alloc_region_list == NULL, "invariant"); | |
746 release_gc_alloc_regions(true /* totally */); | |
747 } | |
748 | |
342 | 749 class PostMCRemSetClearClosure: public HeapRegionClosure { |
750 ModRefBarrierSet* _mr_bs; | |
751 public: | |
752 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
753 bool doHeapRegion(HeapRegion* r) { | |
754 r->reset_gc_time_stamp(); | |
755 if (r->continuesHumongous()) | |
756 return false; | |
757 HeapRegionRemSet* hrrs = r->rem_set(); | |
758 if (hrrs != NULL) hrrs->clear(); | |
759 // You might think here that we could clear just the cards | |
760 // corresponding to the used region. But no: if we leave a dirty card | |
761 // in a region we might allocate into, then it would prevent that card | |
762 // from being enqueued, and cause it to be missed. | |
763 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
764 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
765 return false; | |
766 } | |
767 }; | |
768 | |
769 | |
770 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
771 ModRefBarrierSet* _mr_bs; | |
772 public: | |
773 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
774 bool doHeapRegion(HeapRegion* r) { | |
775 if (r->continuesHumongous()) return false; | |
776 if (r->used_region().word_size() != 0) { | |
777 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
778 } | |
779 return false; | |
780 } | |
781 }; | |
782 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
783 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
784 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
785 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
786 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
787 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
788 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
789 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
790 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
791 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
792 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
793 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
794 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
795 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
796 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
797 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
798 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
799 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
800 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
801 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
802 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
803 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
804 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
805 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
806 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
807 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
808 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
809 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
810 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
811 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
812 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
813 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
814 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
815 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
816 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
817 void G1CollectedHeap::do_collection(bool explicit_gc, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
818 bool clear_all_soft_refs, |
342 | 819 size_t word_size) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
820 if (GC_locker::check_active_before_gc()) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
821 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
822 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
823 |
342 | 824 ResourceMark rm; |
825 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
826 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
827 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
828 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
829 |
342 | 830 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
831 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
832 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
833 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
834 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
835 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
836 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
837 |
342 | 838 { |
839 IsGCActiveMark x; | |
840 | |
841 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
842 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
843 assert(!system_gc || explicit_gc, "invariant"); |
342 | 844 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
845 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
846 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
847 PrintGC, true, gclog_or_tty); |
342 | 848 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
849 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
850 |
342 | 851 double start = os::elapsedTime(); |
852 g1_policy()->record_full_collection_start(); | |
853 | |
854 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
855 increment_total_collections(true /* full gc */); |
342 | 856 |
857 size_t g1h_prev_used = used(); | |
858 assert(used() == recalculate_used(), "Should be equal"); | |
859 | |
860 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
861 HandleMark hm; // Discard invalid handles created during verification | |
862 prepare_for_verify(); | |
863 gclog_or_tty->print(" VerifyBeforeGC:"); | |
864 Universe::verify(true); | |
865 } | |
866 assert(regions_accounted_for(), "Region leakage!"); | |
867 | |
868 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
869 | |
870 // We want to discover references, but not process them yet. | |
871 // This mode is disabled in | |
872 // instanceRefKlass::process_discovered_references if the | |
873 // generation does some collection work, or | |
874 // instanceRefKlass::enqueue_discovered_references if the | |
875 // generation returns without doing any work. | |
876 ref_processor()->disable_discovery(); | |
877 ref_processor()->abandon_partial_discovery(); | |
878 ref_processor()->verify_no_references_recorded(); | |
879 | |
880 // Abandon current iterations of concurrent marking and concurrent | |
881 // refinement, if any are in progress. | |
882 concurrent_mark()->abort(); | |
883 | |
884 // Make sure we'll choose a new allocation region afterwards. | |
885 abandon_cur_alloc_region(); | |
636 | 886 abandon_gc_alloc_regions(); |
342 | 887 assert(_cur_alloc_region == NULL, "Invariant."); |
888 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
889 tear_down_region_lists(); | |
890 set_used_regions_to_need_zero_fill(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
891 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
892 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
893 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
894 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
895 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
896 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
897 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
898 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
899 |
342 | 900 if (g1_policy()->in_young_gc_mode()) { |
901 empty_young_list(); | |
902 g1_policy()->set_full_young_gcs(true); | |
903 } | |
904 | |
905 // Temporarily make reference _discovery_ single threaded (non-MT). | |
906 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
907 | |
908 // Temporarily make refs discovery atomic | |
909 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
910 | |
911 // Temporarily clear _is_alive_non_header | |
912 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
913 | |
914 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
915 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 916 |
917 // Do collection work | |
918 { | |
919 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
920 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 921 } |
922 // Because freeing humongous regions may have added some unclean | |
923 // regions, it is necessary to tear down again before rebuilding. | |
924 tear_down_region_lists(); | |
925 rebuild_region_lists(); | |
926 | |
927 _summary_bytes_used = recalculate_used(); | |
928 | |
929 ref_processor()->enqueue_discovered_references(); | |
930 | |
931 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
932 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
933 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
934 |
342 | 935 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
936 HandleMark hm; // Discard invalid handles created during verification | |
937 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
938 prepare_for_verify(); |
342 | 939 Universe::verify(false); |
940 } | |
941 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
942 | |
943 reset_gc_time_stamp(); | |
944 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
945 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
946 // sets. We will also reset the GC time stamps of the regions. |
342 | 947 PostMCRemSetClearClosure rs_clear(mr_bs()); |
948 heap_region_iterate(&rs_clear); | |
949 | |
950 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
951 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 952 |
953 if (_cg1r->use_cache()) { | |
954 _cg1r->clear_and_record_card_counts(); | |
955 _cg1r->clear_hot_cache(); | |
956 } | |
957 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
958 // Rebuild remembered sets of all regions. |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
959 if (ParallelGCThreads > 0) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
960 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
961 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
962 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
963 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
964 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
965 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
966 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
967 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
968 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
969 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
970 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
971 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
972 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
973 |
342 | 974 if (PrintGC) { |
975 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
976 } | |
977 | |
978 if (true) { // FIXME | |
979 // Ask the permanent generation to adjust size for full collections | |
980 perm()->compute_new_size(); | |
981 } | |
982 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
983 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
984 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
985 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
986 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
987 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
988 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
989 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
990 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
991 |
342 | 992 double end = os::elapsedTime(); |
993 g1_policy()->record_full_collection_end(); | |
994 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
995 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
996 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
997 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
998 |
342 | 999 gc_epilogue(true); |
1000 | |
794 | 1001 // Discard all rset updates |
1002 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1003 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1004 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1005 assert(regions_accounted_for(), "Region leakage!"); |
1006 } | |
1007 | |
1008 if (g1_policy()->in_young_gc_mode()) { | |
1009 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1010 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1011 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1012 assert( check_young_list_empty(true /* check_heap */), |
342 | 1013 "young list should be empty at this point"); |
1014 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1015 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1016 // Update the number of full collections that have been completed. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1017 increment_full_collections_completed(false /* outer */); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1018 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1019 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1020 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1021 } |
342 | 1022 } |
1023 | |
1024 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1025 do_collection(true, /* explicit_gc */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1026 clear_all_soft_refs, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1027 0 /* word_size */); |
342 | 1028 } |
1029 | |
1030 // This code is mostly copied from TenuredGeneration. | |
1031 void | |
1032 G1CollectedHeap:: | |
1033 resize_if_necessary_after_full_collection(size_t word_size) { | |
1034 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1035 | |
1036 // Include the current allocation, if any, and bytes that will be | |
1037 // pre-allocated to support collections, as "used". | |
1038 const size_t used_after_gc = used(); | |
1039 const size_t capacity_after_gc = capacity(); | |
1040 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1041 | |
1042 // We don't have floating point command-line arguments | |
1043 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; | |
1044 const double maximum_used_percentage = 1.0 - minimum_free_percentage; | |
1045 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
1046 const double minimum_used_percentage = 1.0 - maximum_free_percentage; | |
1047 | |
1048 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); | |
1049 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); | |
1050 | |
1051 // Don't shrink less than the initial size. | |
1052 minimum_desired_capacity = | |
1053 MAX2(minimum_desired_capacity, | |
1054 collector_policy()->initial_heap_byte_size()); | |
1055 maximum_desired_capacity = | |
1056 MAX2(maximum_desired_capacity, | |
1057 collector_policy()->initial_heap_byte_size()); | |
1058 | |
1059 // We are failing here because minimum_desired_capacity is | |
1060 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); | |
1061 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); | |
1062 | |
1063 if (PrintGC && Verbose) { | |
1064 const double free_percentage = ((double)free_after_gc) / capacity(); | |
1065 gclog_or_tty->print_cr("Computing new size after full GC "); | |
1066 gclog_or_tty->print_cr(" " | |
1067 " minimum_free_percentage: %6.2f", | |
1068 minimum_free_percentage); | |
1069 gclog_or_tty->print_cr(" " | |
1070 " maximum_free_percentage: %6.2f", | |
1071 maximum_free_percentage); | |
1072 gclog_or_tty->print_cr(" " | |
1073 " capacity: %6.1fK" | |
1074 " minimum_desired_capacity: %6.1fK" | |
1075 " maximum_desired_capacity: %6.1fK", | |
1076 capacity() / (double) K, | |
1077 minimum_desired_capacity / (double) K, | |
1078 maximum_desired_capacity / (double) K); | |
1079 gclog_or_tty->print_cr(" " | |
1080 " free_after_gc : %6.1fK" | |
1081 " used_after_gc : %6.1fK", | |
1082 free_after_gc / (double) K, | |
1083 used_after_gc / (double) K); | |
1084 gclog_or_tty->print_cr(" " | |
1085 " free_percentage: %6.2f", | |
1086 free_percentage); | |
1087 } | |
1088 if (capacity() < minimum_desired_capacity) { | |
1089 // Don't expand unless it's significant | |
1090 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1091 expand(expand_bytes); | |
1092 if (PrintGC && Verbose) { | |
1093 gclog_or_tty->print_cr(" expanding:" | |
1094 " minimum_desired_capacity: %6.1fK" | |
1095 " expand_bytes: %6.1fK", | |
1096 minimum_desired_capacity / (double) K, | |
1097 expand_bytes / (double) K); | |
1098 } | |
1099 | |
1100 // No expansion, now see if we want to shrink | |
1101 } else if (capacity() > maximum_desired_capacity) { | |
1102 // Capacity too large, compute shrinking size | |
1103 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1104 shrink(shrink_bytes); | |
1105 if (PrintGC && Verbose) { | |
1106 gclog_or_tty->print_cr(" " | |
1107 " shrinking:" | |
1108 " initSize: %.1fK" | |
1109 " maximum_desired_capacity: %.1fK", | |
1110 collector_policy()->initial_heap_byte_size() / (double) K, | |
1111 maximum_desired_capacity / (double) K); | |
1112 gclog_or_tty->print_cr(" " | |
1113 " shrink_bytes: %.1fK", | |
1114 shrink_bytes / (double) K); | |
1115 } | |
1116 } | |
1117 } | |
1118 | |
1119 | |
1120 HeapWord* | |
1121 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1122 HeapWord* result = NULL; | |
1123 | |
1124 // In a G1 heap, we're supposed to keep allocation from failing by | |
1125 // incremental pauses. Therefore, at least for now, we'll favor | |
1126 // expansion over collection. (This might change in the future if we can | |
1127 // do something smarter than full collection to satisfy a failed alloc.) | |
1128 | |
1129 result = expand_and_allocate(word_size); | |
1130 if (result != NULL) { | |
1131 assert(is_in(result), "result not in heap"); | |
1132 return result; | |
1133 } | |
1134 | |
1135 // OK, I guess we have to try collection. | |
1136 | |
1137 do_collection(false, false, word_size); | |
1138 | |
1139 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1140 | |
1141 if (result != NULL) { | |
1142 assert(is_in(result), "result not in heap"); | |
1143 return result; | |
1144 } | |
1145 | |
1146 // Try collecting soft references. | |
1147 do_collection(false, true, word_size); | |
1148 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1149 if (result != NULL) { | |
1150 assert(is_in(result), "result not in heap"); | |
1151 return result; | |
1152 } | |
1153 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1154 assert(!collector_policy()->should_clear_all_soft_refs(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1155 "Flag should have been handled and cleared prior to this point"); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1156 |
342 | 1157 // What else? We might try synchronous finalization later. If the total |
1158 // space available is large enough for the allocation, then a more | |
1159 // complete compaction phase than we've tried so far might be | |
1160 // appropriate. | |
1161 return NULL; | |
1162 } | |
1163 | |
1164 // Attempting to expand the heap sufficiently | |
1165 // to support an allocation of the given "word_size". If | |
1166 // successful, perform the allocation and return the address of the | |
1167 // allocated block, or else "NULL". | |
1168 | |
1169 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1170 size_t expand_bytes = word_size * HeapWordSize; | |
1171 if (expand_bytes < MinHeapDeltaBytes) { | |
1172 expand_bytes = MinHeapDeltaBytes; | |
1173 } | |
1174 expand(expand_bytes); | |
1175 assert(regions_accounted_for(), "Region leakage!"); | |
1176 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1177 return result; | |
1178 } | |
1179 | |
1180 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1181 size_t pre_used = 0; | |
1182 size_t cleared_h_regions = 0; | |
1183 size_t freed_regions = 0; | |
1184 UncleanRegionList local_list; | |
1185 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1186 freed_regions, &local_list); | |
1187 | |
1188 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1189 &local_list); | |
1190 return pre_used; | |
1191 } | |
1192 | |
1193 void | |
1194 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1195 size_t& pre_used, | |
1196 size_t& cleared_h, | |
1197 size_t& freed_regions, | |
1198 UncleanRegionList* list, | |
1199 bool par) { | |
1200 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1201 size_t res = 0; | |
677 | 1202 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1203 !hr->is_young()) { | |
1204 if (G1PolicyVerbose > 0) | |
1205 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1206 " during cleanup", hr, hr->used()); | |
1207 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1208 } |
1209 } | |
1210 | |
1211 // FIXME: both this and shrink could probably be more efficient by | |
1212 // doing one "VirtualSpace::expand_by" call rather than several. | |
1213 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1214 size_t old_mem_size = _g1_storage.committed_size(); | |
1215 // We expand by a minimum of 1K. | |
1216 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1217 size_t aligned_expand_bytes = | |
1218 ReservedSpace::page_align_size_up(expand_bytes); | |
1219 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1220 HeapRegion::GrainBytes); | |
1221 expand_bytes = aligned_expand_bytes; | |
1222 while (expand_bytes > 0) { | |
1223 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1224 // Commit more storage. | |
1225 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1226 if (!successful) { | |
1227 expand_bytes = 0; | |
1228 } else { | |
1229 expand_bytes -= HeapRegion::GrainBytes; | |
1230 // Expand the committed region. | |
1231 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1232 _g1_committed.set_end(high); | |
1233 // Create a new HeapRegion. | |
1234 MemRegion mr(base, high); | |
1235 bool is_zeroed = !_g1_max_committed.contains(base); | |
1236 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1237 | |
1238 // Now update max_committed if necessary. | |
1239 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1240 | |
1241 // Add it to the HeapRegionSeq. | |
1242 _hrs->insert(hr); | |
1243 // Set the zero-fill state, according to whether it's already | |
1244 // zeroed. | |
1245 { | |
1246 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1247 if (is_zeroed) { | |
1248 hr->set_zero_fill_complete(); | |
1249 put_free_region_on_list_locked(hr); | |
1250 } else { | |
1251 hr->set_zero_fill_needed(); | |
1252 put_region_on_unclean_list_locked(hr); | |
1253 } | |
1254 } | |
1255 _free_regions++; | |
1256 // And we used up an expansion region to create it. | |
1257 _expansion_regions--; | |
1258 // Tell the cardtable about it. | |
1259 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1260 // And the offset table as well. | |
1261 _bot_shared->resize(_g1_committed.word_size()); | |
1262 } | |
1263 } | |
1264 if (Verbose && PrintGC) { | |
1265 size_t new_mem_size = _g1_storage.committed_size(); | |
1266 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1267 old_mem_size/K, aligned_expand_bytes/K, | |
1268 new_mem_size/K); | |
1269 } | |
1270 } | |
1271 | |
1272 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1273 { | |
1274 size_t old_mem_size = _g1_storage.committed_size(); | |
1275 size_t aligned_shrink_bytes = | |
1276 ReservedSpace::page_align_size_down(shrink_bytes); | |
1277 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1278 HeapRegion::GrainBytes); | |
1279 size_t num_regions_deleted = 0; | |
1280 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1281 | |
1282 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1283 if (mr.byte_size() > 0) | |
1284 _g1_storage.shrink_by(mr.byte_size()); | |
1285 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1286 | |
1287 _g1_committed.set_end(mr.start()); | |
1288 _free_regions -= num_regions_deleted; | |
1289 _expansion_regions += num_regions_deleted; | |
1290 | |
1291 // Tell the cardtable about it. | |
1292 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1293 | |
1294 // And the offset table as well. | |
1295 _bot_shared->resize(_g1_committed.word_size()); | |
1296 | |
1297 HeapRegionRemSet::shrink_heap(n_regions()); | |
1298 | |
1299 if (Verbose && PrintGC) { | |
1300 size_t new_mem_size = _g1_storage.committed_size(); | |
1301 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1302 old_mem_size/K, aligned_shrink_bytes/K, | |
1303 new_mem_size/K); | |
1304 } | |
1305 } | |
1306 | |
1307 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1308 release_gc_alloc_regions(true /* totally */); |
342 | 1309 tear_down_region_lists(); // We will rebuild them in a moment. |
1310 shrink_helper(shrink_bytes); | |
1311 rebuild_region_lists(); | |
1312 } | |
1313 | |
1314 // Public methods. | |
1315 | |
1316 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1317 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1318 #endif // _MSC_VER | |
1319 | |
1320 | |
1321 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1322 SharedHeap(policy_), | |
1323 _g1_policy(policy_), | |
1111 | 1324 _dirty_card_queue_set(false), |
342 | 1325 _ref_processor(NULL), |
1326 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1327 _bot_shared(NULL), | |
1328 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1329 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1330 _evac_failure_scan_stack(NULL) , | |
1331 _mark_in_progress(false), | |
1332 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1333 _cur_alloc_region(NULL), | |
1334 _refine_cte_cl(NULL), | |
1335 _free_region_list(NULL), _free_region_list_size(0), | |
1336 _free_regions(0), | |
1337 _full_collection(false), | |
1338 _unclean_region_list(), | |
1339 _unclean_regions_coming(false), | |
1340 _young_list(new YoungList(this)), | |
1341 _gc_time_stamp(0), | |
526 | 1342 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1343 _full_collections_completed(0), |
526 | 1344 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1345 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1346 _dirty_cards_region_list(NULL) { |
342 | 1347 _g1h = this; // To catch bugs. |
1348 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1349 vm_exit_during_initialization("Failed necessary allocation."); | |
1350 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1351 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1352 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1353 |
342 | 1354 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1355 _task_queues = new RefToScanQueueSet(n_queues); | |
1356 | |
1357 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1358 assert(n_rem_sets > 0, "Invariant."); | |
1359 | |
1360 HeapRegionRemSetIterator** iter_arr = | |
1361 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1362 for (int i = 0; i < n_queues; i++) { | |
1363 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1364 } | |
1365 _rem_set_iterator = iter_arr; | |
1366 | |
1367 for (int i = 0; i < n_queues; i++) { | |
1368 RefToScanQueue* q = new RefToScanQueue(); | |
1369 q->initialize(); | |
1370 _task_queues->register_queue(i, q); | |
1371 } | |
1372 | |
1373 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1374 _gc_alloc_regions[ap] = NULL; |
1375 _gc_alloc_region_counts[ap] = 0; | |
1376 _retained_gc_alloc_regions[ap] = NULL; | |
1377 // by default, we do not retain a GC alloc region for each ap; | |
1378 // we'll override this, when appropriate, below | |
1379 _retain_gc_alloc_region[ap] = false; | |
1380 } | |
1381 | |
1382 // We will try to remember the last half-full tenured region we | |
1383 // allocated to at the end of a collection so that we can re-use it | |
1384 // during the next collection. | |
1385 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1386 | |
342 | 1387 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1388 } | |
1389 | |
1390 jint G1CollectedHeap::initialize() { | |
1166 | 1391 CollectedHeap::pre_initialize(); |
342 | 1392 os::enable_vtime(); |
1393 | |
1394 // Necessary to satisfy locking discipline assertions. | |
1395 | |
1396 MutexLocker x(Heap_lock); | |
1397 | |
1398 // While there are no constraints in the GC code that HeapWordSize | |
1399 // be any particular value, there are multiple other areas in the | |
1400 // system which believe this to be true (e.g. oop->object_size in some | |
1401 // cases incorrectly returns the size in wordSize units rather than | |
1402 // HeapWordSize). | |
1403 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1404 | |
1405 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1406 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1407 | |
1408 // Ensure that the sizes are properly aligned. | |
1409 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1410 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1411 | |
1412 _cg1r = new ConcurrentG1Refine(); | |
1413 | |
1414 // Reserve the maximum. | |
1415 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1416 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1417 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1418 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1419 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1420 |
342 | 1421 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1422 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1423 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1424 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1425 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1426 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1427 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1428 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1429 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1430 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1431 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1432 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1433 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1434 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1435 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1436 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1437 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1438 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1439 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1440 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1441 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1442 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1443 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1444 } |
342 | 1445 |
1446 if (!heap_rs.is_reserved()) { | |
1447 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1448 return JNI_ENOMEM; | |
1449 } | |
1450 | |
1451 // It is important to do this in a way such that concurrent readers can't | |
1452 // temporarily think somethings in the heap. (I've actually seen this | |
1453 // happen in asserts: DLD.) | |
1454 _reserved.set_word_size(0); | |
1455 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1456 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1457 | |
1458 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1459 | |
1460 _num_humongous_regions = 0; | |
1461 | |
1462 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1463 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1464 set_barrier_set(rem_set()->bs()); | |
1465 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1466 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1467 } else { | |
1468 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1469 return JNI_ENOMEM; | |
1470 } | |
1471 | |
1472 // Also create a G1 rem set. | |
1473 if (G1UseHRIntoRS) { | |
1474 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1475 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1476 } else { | |
1477 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1478 return JNI_ENOMEM; | |
1479 } | |
1480 } else { | |
1481 _g1_rem_set = new StupidG1RemSet(this); | |
1482 } | |
1483 | |
1484 // Carve out the G1 part of the heap. | |
1485 | |
1486 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1487 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1488 g1_rs.size()/HeapWordSize); | |
1489 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1490 | |
1491 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1492 | |
1493 _g1_storage.initialize(g1_rs, 0); | |
1494 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1495 _g1_max_committed = _g1_committed; | |
393 | 1496 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1497 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1498 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1499 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1500 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1501 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1502 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1503 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1504 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1505 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1506 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1507 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1508 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1509 |
342 | 1510 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1511 heap_word_size(init_byte_size)); | |
1512 | |
1513 _g1h = this; | |
1514 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1515 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1516 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1517 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1518 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1519 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1520 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1521 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1522 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1523 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1524 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1525 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1526 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1527 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1528 |
342 | 1529 // Create the ConcurrentMark data structure and thread. |
1530 // (Must do this late, so that "max_regions" is defined.) | |
1531 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1532 _cmThread = _cm->cmThread(); | |
1533 | |
1534 // ...and the concurrent zero-fill thread, if necessary. | |
1535 if (G1ConcZeroFill) { | |
1536 _czft = new ConcurrentZFThread(); | |
1537 } | |
1538 | |
1539 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1540 HeapRegionRemSet::init_heap(max_regions()); | |
1541 | |
677 | 1542 // Now expand into the initial heap size. |
1543 expand(init_byte_size); | |
342 | 1544 |
1545 // Perform any initialization actions delegated to the policy. | |
1546 g1_policy()->init(); | |
1547 | |
1548 g1_policy()->note_start_of_mark_thread(); | |
1549 | |
1550 _refine_cte_cl = | |
1551 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1552 g1_rem_set(), | |
1553 concurrent_g1_refine()); | |
1554 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1555 | |
1556 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1557 SATB_Q_FL_lock, | |
1111 | 1558 G1SATBProcessCompletedThreshold, |
342 | 1559 Shared_SATB_Q_lock); |
794 | 1560 |
1561 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1562 DirtyCardQ_FL_lock, | |
1111 | 1563 concurrent_g1_refine()->yellow_zone(), |
1564 concurrent_g1_refine()->red_zone(), | |
794 | 1565 Shared_DirtyCardQ_lock); |
1566 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1567 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1568 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1569 DirtyCardQ_FL_lock, |
1111 | 1570 -1, // never trigger processing |
1571 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1572 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1573 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1574 } |
342 | 1575 // In case we're keeping closure specialization stats, initialize those |
1576 // counts and that mechanism. | |
1577 SpecializationStats::clear(); | |
1578 | |
1579 _gc_alloc_region_list = NULL; | |
1580 | |
1581 // Do later initialization work for concurrent refinement. | |
1582 _cg1r->init(); | |
1583 | |
1584 return JNI_OK; | |
1585 } | |
1586 | |
1587 void G1CollectedHeap::ref_processing_init() { | |
1588 SharedHeap::ref_processing_init(); | |
1589 MemRegion mr = reserved_region(); | |
1590 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1591 mr, // span | |
1592 false, // Reference discovery is not atomic | |
1593 // (though it shouldn't matter here.) | |
1594 true, // mt_discovery | |
1595 NULL, // is alive closure: need to fill this in for efficiency | |
1596 ParallelGCThreads, | |
1597 ParallelRefProcEnabled, | |
1598 true); // Setting next fields of discovered | |
1599 // lists requires a barrier. | |
1600 } | |
1601 | |
1602 size_t G1CollectedHeap::capacity() const { | |
1603 return _g1_committed.byte_size(); | |
1604 } | |
1605 | |
1606 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, | |
1607 int worker_i) { | |
889 | 1608 // Clean cards in the hot card cache |
1609 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set()); | |
1610 | |
342 | 1611 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
1612 int n_completed_buffers = 0; | |
1613 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { | |
1614 n_completed_buffers++; | |
1615 } | |
1616 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1617 (double) n_completed_buffers); | |
1618 dcqs.clear_n_completed_buffers(); | |
1619 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1620 } | |
1621 | |
1622 | |
1623 // Computes the sum of the storage used by the various regions. | |
1624 | |
1625 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1626 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1627 "Should be owned on this thread's behalf."); |
342 | 1628 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1629 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1630 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1631 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1632 result += hr->used(); |
342 | 1633 return result; |
1634 } | |
1635 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1636 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1637 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1638 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1639 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1640 |
342 | 1641 class SumUsedClosure: public HeapRegionClosure { |
1642 size_t _used; | |
1643 public: | |
1644 SumUsedClosure() : _used(0) {} | |
1645 bool doHeapRegion(HeapRegion* r) { | |
1646 if (!r->continuesHumongous()) { | |
1647 _used += r->used(); | |
1648 } | |
1649 return false; | |
1650 } | |
1651 size_t result() { return _used; } | |
1652 }; | |
1653 | |
1654 size_t G1CollectedHeap::recalculate_used() const { | |
1655 SumUsedClosure blk; | |
1656 _hrs->iterate(&blk); | |
1657 return blk.result(); | |
1658 } | |
1659 | |
1660 #ifndef PRODUCT | |
1661 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1662 size_t _num; | |
1663 public: | |
677 | 1664 SumUsedRegionsClosure() : _num(0) {} |
342 | 1665 bool doHeapRegion(HeapRegion* r) { |
1666 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1667 _num += 1; | |
1668 } | |
1669 return false; | |
1670 } | |
1671 size_t result() { return _num; } | |
1672 }; | |
1673 | |
1674 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1675 SumUsedRegionsClosure blk; | |
1676 _hrs->iterate(&blk); | |
1677 return blk.result(); | |
1678 } | |
1679 #endif // PRODUCT | |
1680 | |
1681 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1682 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1683 // otherwise, is there space in the current allocation region? | |
1684 | |
1685 // We need to store the current allocation region in a local variable | |
1686 // here. The problem is that this method doesn't take any locks and | |
1687 // there may be other threads which overwrite the current allocation | |
1688 // region field. attempt_allocation(), for example, sets it to NULL | |
1689 // and this can happen *after* the NULL check here but before the call | |
1690 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1691 // to be a problem in the optimized build, since the two loads of the | |
1692 // current allocation region field are optimized away. | |
1693 HeapRegion* car = _cur_alloc_region; | |
1694 | |
1695 // FIXME: should iterate over all regions? | |
1696 if (car == NULL) { | |
1697 return 0; | |
1698 } | |
1699 return car->free(); | |
1700 } | |
1701 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1702 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1703 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1704 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1705 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1706 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1707 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1708 void G1CollectedHeap::increment_full_collections_completed(bool outer) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1709 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1710 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1711 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1712 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1713 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1714 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1715 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1716 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1717 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1718 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1719 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1720 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1721 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1722 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1723 // This is the case for the inner caller, i.e. a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1724 assert(outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1725 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1726 (full_collections_started == _full_collections_completed + 2), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1727 err_msg("for inner caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1728 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1729 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1730 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1731 // This is the case for the outer caller, i.e. the concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1732 assert(!outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1733 (full_collections_started == _full_collections_completed + 1), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1734 err_msg("for outer caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1735 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1736 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1737 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1738 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1739 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1740 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1741 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1742 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1743 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1744 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1745 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1746 |
342 | 1747 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
1748 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1749 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1750 GCCauseSetter gcs(this, cause); | |
1751 switch (cause) { | |
1752 case GCCause::_heap_inspection: | |
1753 case GCCause::_heap_dump: { | |
1754 HandleMark hm; | |
1755 do_full_collection(false); // don't clear all soft refs | |
1756 break; | |
1757 } | |
1758 default: // XXX FIX ME | |
1759 ShouldNotReachHere(); // Unexpected use of this function | |
1760 } | |
1761 } | |
1762 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1763 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1764 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1765 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1766 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1767 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1768 unsigned int full_gc_count_before; |
342 | 1769 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1770 MutexLocker ml(Heap_lock); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1771 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1772 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1773 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1774 |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1775 // Don't want to do a GC until cleanup is completed. |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1776 wait_for_cleanup_complete(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1777 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1778 // We give up heap lock; VMThread::execute gets it back below |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1779 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1780 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1781 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1782 // Schedule an initial-mark evacuation pause that will start a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1783 // concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1784 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1785 true, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1786 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1787 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1788 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1789 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1790 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1791 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1792 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1793 // Schedule a standard evacuation pause. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1794 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1795 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1796 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1797 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1798 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1799 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1800 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1801 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1802 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
1803 } |
342 | 1804 } |
1805 } | |
1806 | |
1807 bool G1CollectedHeap::is_in(const void* p) const { | |
1808 if (_g1_committed.contains(p)) { | |
1809 HeapRegion* hr = _hrs->addr_to_region(p); | |
1810 return hr->is_in(p); | |
1811 } else { | |
1812 return _perm_gen->as_gen()->is_in(p); | |
1813 } | |
1814 } | |
1815 | |
1816 // Iteration functions. | |
1817 | |
1818 // Iterates an OopClosure over all ref-containing fields of objects | |
1819 // within a HeapRegion. | |
1820 | |
1821 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1822 MemRegion _mr; | |
1823 OopClosure* _cl; | |
1824 public: | |
1825 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1826 : _mr(mr), _cl(cl) {} | |
1827 bool doHeapRegion(HeapRegion* r) { | |
1828 if (! r->continuesHumongous()) { | |
1829 r->oop_iterate(_cl); | |
1830 } | |
1831 return false; | |
1832 } | |
1833 }; | |
1834 | |
678 | 1835 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 1836 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
1837 _hrs->iterate(&blk); | |
678 | 1838 if (do_perm) { |
1839 perm_gen()->oop_iterate(cl); | |
1840 } | |
342 | 1841 } |
1842 | |
678 | 1843 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 1844 IterateOopClosureRegionClosure blk(mr, cl); |
1845 _hrs->iterate(&blk); | |
678 | 1846 if (do_perm) { |
1847 perm_gen()->oop_iterate(cl); | |
1848 } | |
342 | 1849 } |
1850 | |
1851 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1852 | |
1853 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1854 ObjectClosure* _cl; | |
1855 public: | |
1856 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1857 bool doHeapRegion(HeapRegion* r) { | |
1858 if (! r->continuesHumongous()) { | |
1859 r->object_iterate(_cl); | |
1860 } | |
1861 return false; | |
1862 } | |
1863 }; | |
1864 | |
678 | 1865 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 1866 IterateObjectClosureRegionClosure blk(cl); |
1867 _hrs->iterate(&blk); | |
678 | 1868 if (do_perm) { |
1869 perm_gen()->object_iterate(cl); | |
1870 } | |
342 | 1871 } |
1872 | |
1873 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1874 // FIXME: is this right? | |
1875 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1876 } | |
1877 | |
1878 // Calls a SpaceClosure on a HeapRegion. | |
1879 | |
1880 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1881 SpaceClosure* _cl; | |
1882 public: | |
1883 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1884 bool doHeapRegion(HeapRegion* r) { | |
1885 _cl->do_space(r); | |
1886 return false; | |
1887 } | |
1888 }; | |
1889 | |
1890 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1891 SpaceClosureRegionClosure blk(cl); | |
1892 _hrs->iterate(&blk); | |
1893 } | |
1894 | |
1895 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1896 _hrs->iterate(cl); | |
1897 } | |
1898 | |
1899 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1900 HeapRegionClosure* cl) { | |
1901 _hrs->iterate_from(r, cl); | |
1902 } | |
1903 | |
1904 void | |
1905 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1906 _hrs->iterate_from(idx, cl); | |
1907 } | |
1908 | |
1909 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1910 | |
1911 void | |
1912 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1913 int worker, | |
1914 jint claim_value) { | |
355 | 1915 const size_t regions = n_regions(); |
1916 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1917 // try to spread out the starting points of the workers | |
1918 const size_t start_index = regions / worker_num * (size_t) worker; | |
1919 | |
1920 // each worker will actually look at all regions | |
1921 for (size_t count = 0; count < regions; ++count) { | |
1922 const size_t index = (start_index + count) % regions; | |
1923 assert(0 <= index && index < regions, "sanity"); | |
1924 HeapRegion* r = region_at(index); | |
1925 // we'll ignore "continues humongous" regions (we'll process them | |
1926 // when we come across their corresponding "start humongous" | |
1927 // region) and regions already claimed | |
1928 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1929 continue; | |
1930 } | |
1931 // OK, try to claim it | |
342 | 1932 if (r->claimHeapRegion(claim_value)) { |
355 | 1933 // success! |
1934 assert(!r->continuesHumongous(), "sanity"); | |
1935 if (r->startsHumongous()) { | |
1936 // If the region is "starts humongous" we'll iterate over its | |
1937 // "continues humongous" first; in fact we'll do them | |
1938 // first. The order is important. In on case, calling the | |
1939 // closure on the "starts humongous" region might de-allocate | |
1940 // and clear all its "continues humongous" regions and, as a | |
1941 // result, we might end up processing them twice. So, we'll do | |
1942 // them first (notice: most closures will ignore them anyway) and | |
1943 // then we'll do the "starts humongous" region. | |
1944 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1945 HeapRegion* chr = region_at(ch_index); | |
1946 | |
1947 // if the region has already been claimed or it's not | |
1948 // "continues humongous" we're done | |
1949 if (chr->claim_value() == claim_value || | |
1950 !chr->continuesHumongous()) { | |
1951 break; | |
1952 } | |
1953 | |
1954 // Noone should have claimed it directly. We can given | |
1955 // that we claimed its "starts humongous" region. | |
1956 assert(chr->claim_value() != claim_value, "sanity"); | |
1957 assert(chr->humongous_start_region() == r, "sanity"); | |
1958 | |
1959 if (chr->claimHeapRegion(claim_value)) { | |
1960 // we should always be able to claim it; noone else should | |
1961 // be trying to claim this region | |
1962 | |
1963 bool res2 = cl->doHeapRegion(chr); | |
1964 assert(!res2, "Should not abort"); | |
1965 | |
1966 // Right now, this holds (i.e., no closure that actually | |
1967 // does something with "continues humongous" regions | |
1968 // clears them). We might have to weaken it in the future, | |
1969 // but let's leave these two asserts here for extra safety. | |
1970 assert(chr->continuesHumongous(), "should still be the case"); | |
1971 assert(chr->humongous_start_region() == r, "sanity"); | |
1972 } else { | |
1973 guarantee(false, "we should not reach here"); | |
1974 } | |
1975 } | |
1976 } | |
1977 | |
1978 assert(!r->continuesHumongous(), "sanity"); | |
1979 bool res = cl->doHeapRegion(r); | |
1980 assert(!res, "Should not abort"); | |
1981 } | |
1982 } | |
1983 } | |
1984 | |
390 | 1985 class ResetClaimValuesClosure: public HeapRegionClosure { |
1986 public: | |
1987 bool doHeapRegion(HeapRegion* r) { | |
1988 r->set_claim_value(HeapRegion::InitialClaimValue); | |
1989 return false; | |
1990 } | |
1991 }; | |
1992 | |
1993 void | |
1994 G1CollectedHeap::reset_heap_region_claim_values() { | |
1995 ResetClaimValuesClosure blk; | |
1996 heap_region_iterate(&blk); | |
1997 } | |
1998 | |
355 | 1999 #ifdef ASSERT |
2000 // This checks whether all regions in the heap have the correct claim | |
2001 // value. I also piggy-backed on this a check to ensure that the | |
2002 // humongous_start_region() information on "continues humongous" | |
2003 // regions is correct. | |
2004 | |
2005 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2006 private: | |
2007 jint _claim_value; | |
2008 size_t _failures; | |
2009 HeapRegion* _sh_region; | |
2010 public: | |
2011 CheckClaimValuesClosure(jint claim_value) : | |
2012 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2013 bool doHeapRegion(HeapRegion* r) { | |
2014 if (r->claim_value() != _claim_value) { | |
2015 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2016 "claim value = %d, should be %d", | |
2017 r->bottom(), r->end(), r->claim_value(), | |
2018 _claim_value); | |
2019 ++_failures; | |
2020 } | |
2021 if (!r->isHumongous()) { | |
2022 _sh_region = NULL; | |
2023 } else if (r->startsHumongous()) { | |
2024 _sh_region = r; | |
2025 } else if (r->continuesHumongous()) { | |
2026 if (r->humongous_start_region() != _sh_region) { | |
2027 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2028 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2029 r->bottom(), r->end(), | |
2030 r->humongous_start_region(), | |
2031 _sh_region); | |
2032 ++_failures; | |
342 | 2033 } |
2034 } | |
355 | 2035 return false; |
2036 } | |
2037 size_t failures() { | |
2038 return _failures; | |
2039 } | |
2040 }; | |
2041 | |
2042 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2043 CheckClaimValuesClosure cl(claim_value); | |
2044 heap_region_iterate(&cl); | |
2045 return cl.failures() == 0; | |
2046 } | |
2047 #endif // ASSERT | |
342 | 2048 |
2049 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2050 HeapRegion* r = g1_policy()->collection_set(); | |
2051 while (r != NULL) { | |
2052 HeapRegion* next = r->next_in_collection_set(); | |
2053 if (cl->doHeapRegion(r)) { | |
2054 cl->incomplete(); | |
2055 return; | |
2056 } | |
2057 r = next; | |
2058 } | |
2059 } | |
2060 | |
2061 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2062 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2063 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2064 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2065 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2066 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2067 |
342 | 2068 assert(r->in_collection_set(), |
2069 "Start region must be a member of the collection set."); | |
2070 HeapRegion* cur = r; | |
2071 while (cur != NULL) { | |
2072 HeapRegion* next = cur->next_in_collection_set(); | |
2073 if (cl->doHeapRegion(cur) && false) { | |
2074 cl->incomplete(); | |
2075 return; | |
2076 } | |
2077 cur = next; | |
2078 } | |
2079 cur = g1_policy()->collection_set(); | |
2080 while (cur != r) { | |
2081 HeapRegion* next = cur->next_in_collection_set(); | |
2082 if (cl->doHeapRegion(cur) && false) { | |
2083 cl->incomplete(); | |
2084 return; | |
2085 } | |
2086 cur = next; | |
2087 } | |
2088 } | |
2089 | |
2090 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2091 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2092 } | |
2093 | |
2094 | |
2095 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2096 Space* res = heap_region_containing(addr); | |
2097 if (res == NULL) | |
2098 res = perm_gen()->space_containing(addr); | |
2099 return res; | |
2100 } | |
2101 | |
2102 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2103 Space* sp = space_containing(addr); | |
2104 if (sp != NULL) { | |
2105 return sp->block_start(addr); | |
2106 } | |
2107 return NULL; | |
2108 } | |
2109 | |
2110 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2111 Space* sp = space_containing(addr); | |
2112 assert(sp != NULL, "block_size of address outside of heap"); | |
2113 return sp->block_size(addr); | |
2114 } | |
2115 | |
2116 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2117 Space* sp = space_containing(addr); | |
2118 return sp->block_is_obj(addr); | |
2119 } | |
2120 | |
2121 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2122 return true; | |
2123 } | |
2124 | |
2125 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2126 return HeapRegion::GrainBytes; | |
2127 } | |
2128 | |
2129 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2130 // Return the remaining space in the cur alloc region, but not less than | |
2131 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2132 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2133 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2134 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2135 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2136 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2137 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2138 // between when we test for NULL and when we use it later. |
342 | 2139 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2140 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2141 |
342 | 2142 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2143 return max_tlab_size; |
342 | 2144 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2145 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2146 max_tlab_size); |
342 | 2147 } |
2148 } | |
2149 | |
2150 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
2151 bool dummy; | |
2152 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
2153 } | |
2154 | |
2155 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2156 return false; | |
2157 } | |
2158 | |
2159 size_t G1CollectedHeap::large_typearray_limit() { | |
2160 // FIXME | |
2161 return HeapRegion::GrainBytes/HeapWordSize; | |
2162 } | |
2163 | |
2164 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2165 return g1_reserved_obj_bytes(); |
342 | 2166 } |
2167 | |
2168 jlong G1CollectedHeap::millis_since_last_gc() { | |
2169 // assert(false, "NYI"); | |
2170 return 0; | |
2171 } | |
2172 | |
2173 | |
2174 void G1CollectedHeap::prepare_for_verify() { | |
2175 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2176 ensure_parsability(false); | |
2177 } | |
2178 g1_rem_set()->prepare_for_verify(); | |
2179 } | |
2180 | |
2181 class VerifyLivenessOopClosure: public OopClosure { | |
2182 G1CollectedHeap* g1h; | |
2183 public: | |
2184 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2185 g1h = _g1h; | |
2186 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2187 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2188 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2189 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2190 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2191 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2192 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2193 "Dead object referenced by a not dead object"); |
342 | 2194 } |
2195 }; | |
2196 | |
2197 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2198 private: |
342 | 2199 G1CollectedHeap* _g1h; |
2200 size_t _live_bytes; | |
2201 HeapRegion *_hr; | |
811 | 2202 bool _use_prev_marking; |
342 | 2203 public: |
811 | 2204 // use_prev_marking == true -> use "prev" marking information, |
2205 // use_prev_marking == false -> use "next" marking information | |
2206 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2207 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2208 _g1h = G1CollectedHeap::heap(); |
2209 } | |
2210 void do_object(oop o) { | |
2211 VerifyLivenessOopClosure isLive(_g1h); | |
2212 assert(o != NULL, "Huh?"); | |
811 | 2213 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2214 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2215 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2216 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2217 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2218 } |
342 | 2219 } |
2220 } | |
2221 size_t live_bytes() { return _live_bytes; } | |
2222 }; | |
2223 | |
2224 class PrintObjsInRegionClosure : public ObjectClosure { | |
2225 HeapRegion *_hr; | |
2226 G1CollectedHeap *_g1; | |
2227 public: | |
2228 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2229 _g1 = G1CollectedHeap::heap(); | |
2230 }; | |
2231 | |
2232 void do_object(oop o) { | |
2233 if (o != NULL) { | |
2234 HeapWord *start = (HeapWord *) o; | |
2235 size_t word_sz = o->size(); | |
2236 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2237 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2238 (void*) o, word_sz, | |
2239 _g1->isMarkedPrev(o), | |
2240 _g1->isMarkedNext(o), | |
2241 _hr->obj_allocated_since_prev_marking(o)); | |
2242 HeapWord *end = start + word_sz; | |
2243 HeapWord *cur; | |
2244 int *val; | |
2245 for (cur = start; cur < end; cur++) { | |
2246 val = (int *) cur; | |
2247 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2248 } | |
2249 } | |
2250 } | |
2251 }; | |
2252 | |
2253 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2254 private: |
342 | 2255 bool _allow_dirty; |
390 | 2256 bool _par; |
811 | 2257 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2258 bool _failures; |
811 | 2259 public: |
2260 // use_prev_marking == true -> use "prev" marking information, | |
2261 // use_prev_marking == false -> use "next" marking information | |
2262 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2263 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2264 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2265 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2266 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2267 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2268 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2269 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2270 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2271 |
342 | 2272 bool doHeapRegion(HeapRegion* r) { |
390 | 2273 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2274 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2275 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2276 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2277 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2278 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2279 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2280 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2281 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2282 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2283 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2284 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2285 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2286 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2287 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2288 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2289 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2290 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2291 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2292 } |
342 | 2293 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2294 return false; // stop the region iteration if we hit a failure |
342 | 2295 } |
2296 }; | |
2297 | |
2298 class VerifyRootsClosure: public OopsInGenClosure { | |
2299 private: | |
2300 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2301 bool _use_prev_marking; |
342 | 2302 bool _failures; |
2303 public: | |
811 | 2304 // use_prev_marking == true -> use "prev" marking information, |
2305 // use_prev_marking == false -> use "next" marking information | |
2306 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2307 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2308 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2309 _failures(false) { } |
342 | 2310 |
2311 bool failures() { return _failures; } | |
2312 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2313 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2314 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2315 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2316 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2317 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2318 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2319 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2320 obj->print_on(gclog_or_tty); |
2321 _failures = true; | |
2322 } | |
2323 } | |
2324 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2325 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2326 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2327 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2328 }; |
2329 | |
390 | 2330 // This is the task used for parallel heap verification. |
2331 | |
2332 class G1ParVerifyTask: public AbstractGangTask { | |
2333 private: | |
2334 G1CollectedHeap* _g1h; | |
2335 bool _allow_dirty; | |
811 | 2336 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2337 bool _failures; |
390 | 2338 |
2339 public: | |
811 | 2340 // use_prev_marking == true -> use "prev" marking information, |
2341 // use_prev_marking == false -> use "next" marking information | |
2342 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2343 bool use_prev_marking) : | |
390 | 2344 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2345 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2346 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2347 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2348 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2349 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2350 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2351 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2352 } |
390 | 2353 |
2354 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2355 HandleMark hm; |
811 | 2356 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2357 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2358 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2359 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2360 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2361 } |
390 | 2362 } |
2363 }; | |
2364 | |
342 | 2365 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2366 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2367 } | |
2368 | |
2369 void G1CollectedHeap::verify(bool allow_dirty, | |
2370 bool silent, | |
2371 bool use_prev_marking) { | |
342 | 2372 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2373 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2374 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2375 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2376 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2377 false, |
342 | 2378 SharedHeap::SO_AllClasses, |
2379 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2380 &blobsCl, |
342 | 2381 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2382 bool failures = rootsCl.failures(); |
342 | 2383 rem_set()->invalidate(perm_gen()->used_region(), false); |
2384 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2385 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2386 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2387 "sanity check"); | |
2388 | |
811 | 2389 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2390 int n_workers = workers()->total_workers(); |
2391 set_par_threads(n_workers); | |
2392 workers()->run_task(&task); | |
2393 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2394 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2395 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2396 } |
390 | 2397 |
2398 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2399 "sanity check"); | |
2400 | |
2401 reset_heap_region_claim_values(); | |
2402 | |
2403 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2404 "sanity check"); | |
2405 } else { | |
811 | 2406 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2407 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2408 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2409 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2410 } |
390 | 2411 } |
342 | 2412 if (!silent) gclog_or_tty->print("remset "); |
2413 rem_set()->verify(); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2414 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2415 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2416 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2417 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2418 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2419 #ifndef PRODUCT |
1044 | 2420 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2421 concurrent_mark()->print_reachable("at-verification-failure", |
2422 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2423 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2424 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2425 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2426 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2427 guarantee(!failures, "there should not have been any failures"); |
342 | 2428 } else { |
2429 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2430 } | |
2431 } | |
2432 | |
2433 class PrintRegionClosure: public HeapRegionClosure { | |
2434 outputStream* _st; | |
2435 public: | |
2436 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2437 bool doHeapRegion(HeapRegion* r) { | |
2438 r->print_on(_st); | |
2439 return false; | |
2440 } | |
2441 }; | |
2442 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2443 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2444 |
2445 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2446 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2447 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2448 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2449 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2450 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2451 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2452 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2453 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2454 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2455 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2456 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2457 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2458 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2459 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2460 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2461 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2462 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2463 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2464 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2465 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2466 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2467 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2468 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2469 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2470 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2471 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2472 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2473 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2474 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2475 PrintRegionClosure blk(st); |
2476 _hrs->iterate(&blk); | |
2477 } | |
2478 | |
2479 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
2480 if (ParallelGCThreads > 0) { | |
1019 | 2481 workers()->print_worker_threads_on(st); |
2482 } | |
2483 | |
2484 _cmThread->print_on(st); | |
342 | 2485 st->cr(); |
1019 | 2486 |
2487 _cm->print_worker_threads_on(st); | |
2488 | |
2489 _cg1r->print_worker_threads_on(st); | |
2490 | |
342 | 2491 _czft->print_on(st); |
2492 st->cr(); | |
2493 } | |
2494 | |
2495 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2496 if (ParallelGCThreads > 0) { | |
2497 workers()->threads_do(tc); | |
2498 } | |
2499 tc->do_thread(_cmThread); | |
794 | 2500 _cg1r->threads_do(tc); |
342 | 2501 tc->do_thread(_czft); |
2502 } | |
2503 | |
2504 void G1CollectedHeap::print_tracing_info() const { | |
2505 // We'll overload this to mean "trace GC pause statistics." | |
2506 if (TraceGen0Time || TraceGen1Time) { | |
2507 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2508 // to that. | |
2509 g1_policy()->print_tracing_info(); | |
2510 } | |
751 | 2511 if (G1SummarizeRSetStats) { |
342 | 2512 g1_rem_set()->print_summary_info(); |
2513 } | |
1282 | 2514 if (G1SummarizeConcMark) { |
342 | 2515 concurrent_mark()->print_summary_info(); |
2516 } | |
751 | 2517 if (G1SummarizeZFStats) { |
342 | 2518 ConcurrentZFThread::print_summary_info(); |
2519 } | |
2520 g1_policy()->print_yg_surv_rate_info(); | |
2521 | |
2522 SpecializationStats::print(); | |
2523 } | |
2524 | |
2525 | |
2526 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2527 HeapRegion* hr = heap_region_containing(addr); | |
2528 if (hr == NULL) { | |
2529 return 0; | |
2530 } else { | |
2531 return 1; | |
2532 } | |
2533 } | |
2534 | |
2535 G1CollectedHeap* G1CollectedHeap::heap() { | |
2536 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2537 "not a garbage-first heap"); | |
2538 return _g1h; | |
2539 } | |
2540 | |
2541 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2542 // always_do_update_barrier = false; |
342 | 2543 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
2544 // Call allocation profiler | |
2545 AllocationProfiler::iterate_since_last_gc(); | |
2546 // Fill TLAB's and such | |
2547 ensure_parsability(true); | |
2548 } | |
2549 | |
2550 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2551 // FIXME: what is this about? | |
2552 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2553 // is set. | |
2554 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2555 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2556 // always_do_update_barrier = true; |
342 | 2557 } |
2558 | |
2559 void G1CollectedHeap::do_collection_pause() { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2560 assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2561 |
342 | 2562 // Read the GC count while holding the Heap_lock |
2563 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2564 // ensure that we do not give up the heap lock and potentially | |
2565 // pick up the wrong count | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2566 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); |
342 | 2567 |
2568 // Don't want to do a GC pause while cleanup is being completed! | |
2569 wait_for_cleanup_complete(); | |
2570 | |
2571 g1_policy()->record_stop_world_start(); | |
2572 { | |
2573 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2574 VM_G1IncCollectionPause op(gc_count_before, |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2575 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2576 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2577 GCCause::_g1_inc_collection_pause); |
342 | 2578 VMThread::execute(&op); |
2579 } | |
2580 } | |
2581 | |
2582 void | |
2583 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2584 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2585 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2586 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2587 CGC_lock->notify(); |
342 | 2588 } |
2589 } | |
2590 | |
2591 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2592 G1CollectedHeap* _g1h; | |
2593 public: | |
2594 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2595 void do_object(oop obj) { | |
2596 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2597 "markandsweep mark should agree with concurrent deadness"); | |
2598 } | |
2599 }; | |
2600 | |
2601 void | |
2602 G1CollectedHeap::checkConcurrentMark() { | |
2603 VerifyMarkedObjsClosure verifycl(this); | |
2604 // MutexLockerEx x(getMarkBitMapLock(), | |
2605 // Mutex::_no_safepoint_check_flag); | |
678 | 2606 object_iterate(&verifycl, false); |
342 | 2607 } |
2608 | |
2609 void G1CollectedHeap::do_sync_mark() { | |
2610 _cm->checkpointRootsInitial(); | |
2611 _cm->markFromRoots(); | |
2612 _cm->checkpointRootsFinal(false); | |
2613 } | |
2614 | |
2615 // <NEW PREDICTION> | |
2616 | |
2617 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2618 bool young) { | |
2619 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2620 } | |
2621 | |
2622 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2623 predicted_time_ms) { | |
2624 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2625 } | |
2626 | |
2627 size_t G1CollectedHeap::pending_card_num() { | |
2628 size_t extra_cards = 0; | |
2629 JavaThread *curr = Threads::first(); | |
2630 while (curr != NULL) { | |
2631 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2632 extra_cards += dcq.size(); | |
2633 curr = curr->next(); | |
2634 } | |
2635 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2636 size_t buffer_size = dcqs.buffer_size(); | |
2637 size_t buffer_num = dcqs.completed_buffers_num(); | |
2638 return buffer_size * buffer_num + extra_cards; | |
2639 } | |
2640 | |
2641 size_t G1CollectedHeap::max_pending_card_num() { | |
2642 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2643 size_t buffer_size = dcqs.buffer_size(); | |
2644 size_t buffer_num = dcqs.completed_buffers_num(); | |
2645 int thread_num = Threads::number_of_threads(); | |
2646 return (buffer_num + thread_num) * buffer_size; | |
2647 } | |
2648 | |
2649 size_t G1CollectedHeap::cards_scanned() { | |
2650 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2651 return g1_rset->cardsScanned(); | |
2652 } | |
2653 | |
2654 void | |
2655 G1CollectedHeap::setup_surviving_young_words() { | |
2656 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2657 size_t array_length = g1_policy()->young_cset_length(); | |
2658 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2659 if (_surviving_young_words == NULL) { | |
2660 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2661 "Not enough space for young surv words summary."); | |
2662 } | |
2663 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2664 #ifdef ASSERT |
342 | 2665 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2666 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2667 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2668 #endif // !ASSERT |
342 | 2669 } |
2670 | |
2671 void | |
2672 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2673 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2674 size_t array_length = g1_policy()->young_cset_length(); | |
2675 for (size_t i = 0; i < array_length; ++i) | |
2676 _surviving_young_words[i] += surv_young_words[i]; | |
2677 } | |
2678 | |
2679 void | |
2680 G1CollectedHeap::cleanup_surviving_young_words() { | |
2681 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2682 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2683 _surviving_young_words = NULL; | |
2684 } | |
2685 | |
2686 // </NEW PREDICTION> | |
2687 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2688 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2689 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2690 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2691 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2692 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2693 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2694 |
342 | 2695 void |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2696 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2697 if (GC_locker::check_active_before_gc()) { |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2698 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2699 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2700 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2701 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2702 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2703 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2704 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2705 { |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2706 ResourceMark rm; |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2707 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2708 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2709 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2710 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2711 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2712 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2713 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2714 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2715 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2716 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2717 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2718 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2719 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2720 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2721 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2722 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2723 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2724 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2725 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2726 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2727 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2728 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2729 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2730 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2731 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2732 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2733 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2734 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2735 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2736 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2737 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2738 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2739 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2740 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2741 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2742 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2743 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2744 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2745 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2746 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2747 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2748 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2749 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2750 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2751 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2752 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2753 increment_total_collections(false /* full gc */); |
342 | 2754 |
2755 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2756 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2757 print(); |
342 | 2758 #endif |
2759 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2760 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2761 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2762 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2763 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2764 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2765 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2766 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2767 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2768 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2769 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2770 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2771 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2772 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2773 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2774 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2775 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2776 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2777 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2778 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2779 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2780 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2781 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2782 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2783 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2784 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2785 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2786 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2787 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2788 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2789 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2790 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2791 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2792 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2793 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2794 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2795 #endif // YOUNG_LIST_VERBOSE |
342 | 2796 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2797 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2798 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2799 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2800 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2801 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2802 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2803 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2804 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2805 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2806 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2807 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2808 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2809 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2810 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2811 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2812 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2813 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2814 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2815 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2816 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2817 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2818 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2819 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2820 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2821 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2822 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2823 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2824 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2825 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2826 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2827 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2828 // Now choose the CS. We may abandon a pause if we find no |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2829 // region that will fit in the MMU pause. |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2830 bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2831 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2832 // Nothing to do if we were unable to choose a collection set. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2833 if (!abandoned) { |
342 | 2834 #if G1_REM_SET_LOGGING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2835 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2836 print(); |
342 | 2837 #endif |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2838 PrepareForRSScanningClosure prepare_for_rs_scan; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
2839 collection_set_iterate(&prepare_for_rs_scan); |
342 | 2840 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2841 setup_surviving_young_words(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2842 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2843 // Set up the gc allocation regions. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2844 get_gc_alloc_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2845 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2846 // Actually do the work... |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2847 evacuate_collection_set(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2848 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2849 free_collection_set(g1_policy()->collection_set()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2850 g1_policy()->clear_collection_set(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2851 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2852 cleanup_surviving_young_words(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2853 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2854 // Start a new incremental collection set for the next pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2855 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2856 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2857 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2858 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2859 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2860 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2861 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2862 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2863 _young_list->reset_sampled_info(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2864 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2865 // Don't check the whole heap at this point as the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2866 // GC alloc regions from this pause have been tagged |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2867 // as survivors and moved on to the survivor list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2868 // Survivor regions will fail the !is_young() check. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2869 assert(check_young_list_empty(false /* check_heap */), |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2870 "young list should be empty"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2871 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2872 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2873 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2874 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2875 #endif // YOUNG_LIST_VERBOSE |
342 | 2876 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2877 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2878 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2879 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2880 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2881 _young_list->reset_auxilary_lists(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2882 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2883 } else { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2884 // We have abandoned the current collection. This can only happen |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2885 // if we're not doing young or partially young collections, and |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2886 // we didn't find an old region that we're able to collect within |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2887 // the allowed time. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2888 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2889 assert(g1_policy()->collection_set() == NULL, "should be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2890 assert(_young_list->length() == 0, "because it should be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2891 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2892 // This should be a no-op. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2893 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2894 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2895 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2896 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2897 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2898 // Start a new incremental collection set for the next pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2899 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2900 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2901 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2902 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2903 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2904 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2905 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2906 // This looks confusing, because the DPT should really be empty |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2907 // at this point -- since we have not done any collection work, |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2908 // there should not be any derived pointers in the table to update; |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2909 // however, there is some additional state in the DPT which is |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2910 // reset at the end of the (null) "gc" here via the following call. |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2911 // A better approach might be to split off that state resetting work |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2912 // into a separate method that asserts that the DPT is empty and call |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2913 // that here. That is deferred for now. |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2914 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
342 | 2915 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2916 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2917 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2918 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2919 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2920 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2921 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2922 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2923 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2924 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2925 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
2926 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2927 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2928 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2929 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2930 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2931 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2932 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2933 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2934 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2935 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2936 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2937 } |
342 | 2938 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2939 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2940 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2941 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2942 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2943 #endif // YOUNG_LIST_VERBOSE |
342 | 2944 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2945 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2946 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2947 g1_policy()->record_pause_time_ms(pause_time_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2948 g1_policy()->record_collection_pause_end(abandoned); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2949 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2950 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2951 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2952 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
2953 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2954 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2955 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2956 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2957 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2958 Universe::verify(false); |
342 | 2959 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2960 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2961 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2962 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2963 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2964 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2965 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2966 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2967 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2968 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2969 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2970 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2971 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2972 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2973 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2974 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2975 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2976 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2977 #endif |
342 | 2978 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2979 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2980 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2981 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2982 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2983 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2984 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2985 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2986 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2987 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2988 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2989 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2990 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2991 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2992 Universe::print_heap_after_gc(); |
342 | 2993 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2994 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2995 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2996 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2997 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2998 } |
342 | 2999 } |
3000 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3001 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3002 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3003 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3004 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3005 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3006 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3007 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3008 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3009 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3010 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3011 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3012 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3013 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3014 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3015 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3016 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3017 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3018 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3019 |
342 | 3020 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3021 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3022 // make sure we don't call set_gc_alloc_region() multiple times on |
3023 // the same region | |
3024 assert(r == NULL || !r->is_gc_alloc_region(), | |
3025 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3026 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3027 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3028 |
342 | 3029 HeapWord* original_top = NULL; |
3030 if (r != NULL) | |
3031 original_top = r->top(); | |
3032 | |
3033 // We will want to record the used space in r as being there before gc. | |
3034 // One we install it as a GC alloc region it's eligible for allocation. | |
3035 // So record it now and use it later. | |
3036 size_t r_used = 0; | |
3037 if (r != NULL) { | |
3038 r_used = r->used(); | |
3039 | |
3040 if (ParallelGCThreads > 0) { | |
3041 // need to take the lock to guard against two threads calling | |
3042 // get_gc_alloc_region concurrently (very unlikely but...) | |
3043 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3044 r->save_marks(); | |
3045 } | |
3046 } | |
3047 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3048 _gc_alloc_regions[purpose] = r; | |
3049 if (old_alloc_region != NULL) { | |
3050 // Replace aliases too. | |
3051 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3052 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3053 _gc_alloc_regions[ap] = r; | |
3054 } | |
3055 } | |
3056 } | |
3057 if (r != NULL) { | |
3058 push_gc_alloc_region(r); | |
3059 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3060 // We are using a region as a GC alloc region after it has been used | |
3061 // as a mutator allocation region during the current marking cycle. | |
3062 // The mutator-allocated objects are currently implicitly marked, but | |
3063 // when we move hr->next_top_at_mark_start() forward at the the end | |
3064 // of the GC pause, they won't be. We therefore mark all objects in | |
3065 // the "gap". We do this object-by-object, since marking densely | |
3066 // does not currently work right with marking bitmap iteration. This | |
3067 // means we rely on TLAB filling at the start of pauses, and no | |
3068 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3069 // to fix the marking bitmap iteration. | |
3070 HeapWord* curhw = r->next_top_at_mark_start(); | |
3071 HeapWord* t = original_top; | |
3072 | |
3073 while (curhw < t) { | |
3074 oop cur = (oop)curhw; | |
3075 // We'll assume parallel for generality. This is rare code. | |
3076 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3077 curhw = curhw + cur->size(); | |
3078 } | |
3079 assert(curhw == t, "Should have parsed correctly."); | |
3080 } | |
3081 if (G1PolicyVerbose > 1) { | |
3082 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3083 "for survivors:", r->bottom(), original_top, r->end()); | |
3084 r->print(); | |
3085 } | |
3086 g1_policy()->record_before_bytes(r_used); | |
3087 } | |
3088 } | |
3089 | |
3090 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3091 assert(Thread::current()->is_VM_thread() || | |
3092 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
3093 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
3094 "Precondition."); | |
3095 hr->set_is_gc_alloc_region(true); | |
3096 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3097 _gc_alloc_region_list = hr; | |
3098 } | |
3099 | |
3100 #ifdef G1_DEBUG | |
3101 class FindGCAllocRegion: public HeapRegionClosure { | |
3102 public: | |
3103 bool doHeapRegion(HeapRegion* r) { | |
3104 if (r->is_gc_alloc_region()) { | |
3105 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3106 r->hrs_index(), r->bottom()); | |
3107 } | |
3108 return false; | |
3109 } | |
3110 }; | |
3111 #endif // G1_DEBUG | |
3112 | |
3113 void G1CollectedHeap::forget_alloc_region_list() { | |
3114 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
3115 while (_gc_alloc_region_list != NULL) { | |
3116 HeapRegion* r = _gc_alloc_region_list; | |
3117 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3118 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3119 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3120 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3121 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3122 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3123 r->ContiguousSpace::set_saved_mark(); |
342 | 3124 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3125 r->set_next_gc_alloc_region(NULL); | |
3126 r->set_is_gc_alloc_region(false); | |
545 | 3127 if (r->is_survivor()) { |
3128 if (r->is_empty()) { | |
3129 r->set_not_young(); | |
3130 } else { | |
3131 _young_list->add_survivor_region(r); | |
3132 } | |
3133 } | |
342 | 3134 if (r->is_empty()) { |
3135 ++_free_regions; | |
3136 } | |
3137 } | |
3138 #ifdef G1_DEBUG | |
3139 FindGCAllocRegion fa; | |
3140 heap_region_iterate(&fa); | |
3141 #endif // G1_DEBUG | |
3142 } | |
3143 | |
3144 | |
3145 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3146 // TODO: allocation regions check | |
3147 return true; | |
3148 } | |
3149 | |
3150 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3151 // First, let's check that the GC alloc region list is empty (it should) |
3152 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3153 | |
342 | 3154 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3155 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3156 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3157 |
342 | 3158 // Create new GC alloc regions. |
636 | 3159 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3160 _retained_gc_alloc_regions[ap] = NULL; | |
3161 | |
3162 if (alloc_region != NULL) { | |
3163 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3164 | |
3165 // let's make sure that the GC alloc region is not tagged as such | |
3166 // outside a GC operation | |
3167 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3168 | |
3169 if (alloc_region->in_collection_set() || | |
3170 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3171 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3172 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3173 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3174 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3175 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3176 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3177 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3178 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3179 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3180 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3181 // object that may be less than the region size). |
636 | 3182 |
3183 alloc_region = NULL; | |
3184 } | |
3185 } | |
3186 | |
3187 if (alloc_region == NULL) { | |
3188 // we will get a new GC alloc region | |
342 | 3189 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3190 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3191 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3192 ++_gc_alloc_region_counts[ap]; |
1388 | 3193 if (G1PrintHeapRegions) { |
3194 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3195 "top "PTR_FORMAT, | |
3196 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3197 } | |
342 | 3198 } |
636 | 3199 |
342 | 3200 if (alloc_region != NULL) { |
636 | 3201 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3202 set_gc_alloc_region(ap, alloc_region); |
3203 } | |
636 | 3204 |
3205 assert(_gc_alloc_regions[ap] == NULL || | |
3206 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3207 "the GC alloc region should be tagged as such"); | |
3208 assert(_gc_alloc_regions[ap] == NULL || | |
3209 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3210 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3211 } |
3212 // Set alternative regions for allocation purposes that have reached | |
636 | 3213 // their limit. |
342 | 3214 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3215 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3216 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3217 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3218 } | |
3219 } | |
3220 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3221 } | |
3222 | |
636 | 3223 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3224 // We keep a separate list of all regions that have been alloc regions in |
636 | 3225 // the current collection pause. Forget that now. This method will |
3226 // untag the GC alloc regions and tear down the GC alloc region | |
3227 // list. It's desirable that no regions are tagged as GC alloc | |
3228 // outside GCs. | |
342 | 3229 forget_alloc_region_list(); |
3230 | |
3231 // The current alloc regions contain objs that have survived | |
3232 // collection. Make them no longer GC alloc regions. | |
3233 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3234 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3235 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3236 _gc_alloc_region_counts[ap] = 0; |
636 | 3237 |
3238 if (r != NULL) { | |
3239 // we retain nothing on _gc_alloc_regions between GCs | |
3240 set_gc_alloc_region(ap, NULL); | |
3241 | |
3242 if (r->is_empty()) { | |
3243 // we didn't actually allocate anything in it; let's just put | |
3244 // it on the free list | |
342 | 3245 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3246 r->set_zero_fill_complete(); | |
3247 put_free_region_on_list_locked(r); | |
636 | 3248 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3249 // retain it so that we can use it at the beginning of the next GC | |
3250 _retained_gc_alloc_regions[ap] = r; | |
342 | 3251 } |
3252 } | |
636 | 3253 } |
3254 } | |
3255 | |
3256 #ifndef PRODUCT | |
3257 // Useful for debugging | |
3258 | |
3259 void G1CollectedHeap::print_gc_alloc_regions() { | |
3260 gclog_or_tty->print_cr("GC alloc regions"); | |
3261 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3262 HeapRegion* r = _gc_alloc_regions[ap]; | |
3263 if (r == NULL) { | |
3264 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3265 } else { | |
3266 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3267 ap, r->bottom(), r->used()); | |
3268 } | |
3269 } | |
3270 } | |
3271 #endif // PRODUCT | |
342 | 3272 |
3273 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3274 _drain_in_progress = false; | |
3275 set_evac_failure_closure(cl); | |
3276 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3277 } | |
3278 | |
3279 void G1CollectedHeap::finalize_for_evac_failure() { | |
3280 assert(_evac_failure_scan_stack != NULL && | |
3281 _evac_failure_scan_stack->length() == 0, | |
3282 "Postcondition"); | |
3283 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3284 delete _evac_failure_scan_stack; |
342 | 3285 _evac_failure_scan_stack = NULL; |
3286 } | |
3287 | |
3288 | |
3289 | |
3290 // *** Sequential G1 Evacuation | |
3291 | |
3292 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
3293 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3294 // let the caller handle alloc failure | |
3295 if (alloc_region == NULL) return NULL; | |
3296 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
3297 "Either the object is humongous or the region isn't"); | |
3298 HeapWord* block = alloc_region->allocate(word_size); | |
3299 if (block == NULL) { | |
3300 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
3301 } | |
3302 return block; | |
3303 } | |
3304 | |
3305 class G1IsAliveClosure: public BoolObjectClosure { | |
3306 G1CollectedHeap* _g1; | |
3307 public: | |
3308 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3309 void do_object(oop p) { assert(false, "Do not call."); } | |
3310 bool do_object_b(oop p) { | |
3311 // It is reachable if it is outside the collection set, or is inside | |
3312 // and forwarded. | |
3313 | |
3314 #ifdef G1_DEBUG | |
3315 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3316 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3317 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3318 #endif // G1_DEBUG | |
3319 | |
3320 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3321 } | |
3322 }; | |
3323 | |
3324 class G1KeepAliveClosure: public OopClosure { | |
3325 G1CollectedHeap* _g1; | |
3326 public: | |
3327 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3328 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3329 void do_oop( oop* p) { |
342 | 3330 oop obj = *p; |
3331 #ifdef G1_DEBUG | |
3332 if (PrintGC && Verbose) { | |
3333 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3334 p, (void*) obj, (void*) *p); | |
3335 } | |
3336 #endif // G1_DEBUG | |
3337 | |
3338 if (_g1->obj_in_cs(obj)) { | |
3339 assert( obj->is_forwarded(), "invariant" ); | |
3340 *p = obj->forwardee(); | |
3341 #ifdef G1_DEBUG | |
3342 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3343 (void*) obj, (void*) *p); | |
3344 #endif // G1_DEBUG | |
3345 } | |
3346 } | |
3347 }; | |
3348 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3349 class UpdateRSetImmediate : public OopsInHeapRegionClosure { |
342 | 3350 private: |
3351 G1CollectedHeap* _g1; | |
3352 G1RemSet* _g1_rem_set; | |
3353 public: | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3354 UpdateRSetImmediate(G1CollectedHeap* g1) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3355 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} |
342 | 3356 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3357 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3358 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3359 template <class T> void do_oop_work(T* p) { |
342 | 3360 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3361 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3362 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3363 _g1_rem_set->par_write_ref(_from, p, 0); |
342 | 3364 } |
3365 } | |
3366 }; | |
3367 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3368 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3369 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3370 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3371 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3372 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3373 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3374 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3375 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3376 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3377 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3378 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3379 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3380 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3381 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3382 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3383 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3384 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3385 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3386 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3387 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3388 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3389 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3390 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3391 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3392 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3393 |
342 | 3394 class RemoveSelfPointerClosure: public ObjectClosure { |
3395 private: | |
3396 G1CollectedHeap* _g1; | |
3397 ConcurrentMark* _cm; | |
3398 HeapRegion* _hr; | |
3399 size_t _prev_marked_bytes; | |
3400 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3401 OopsInHeapRegionClosure *_cl; |
342 | 3402 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3403 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3404 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3405 _next_marked_bytes(0), _cl(cl) {} |
342 | 3406 |
3407 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3408 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3409 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3410 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3411 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3412 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3413 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3414 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3415 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3416 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3417 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3418 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3419 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3420 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3421 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3422 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3423 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3424 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3425 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3426 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3427 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3428 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3429 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3430 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3431 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3432 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3433 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3434 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3435 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3436 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3437 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3438 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3439 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3440 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3441 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3442 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3443 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3444 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3445 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3446 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3447 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3448 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3449 CollectedHeap::fill_with_object(mr); |
342 | 3450 _cm->clearRangeBothMaps(mr); |
3451 } | |
3452 } | |
3453 }; | |
3454 | |
3455 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3456 UpdateRSetImmediate immediate_update(_g1h); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3457 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3458 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3459 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3460 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3461 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3462 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3463 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3464 } |
342 | 3465 HeapRegion* cur = g1_policy()->collection_set(); |
3466 while (cur != NULL) { | |
3467 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3468 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3469 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3470 if (cur->evacuation_failed()) { |
3471 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3472 cl->set_region(cur); |
342 | 3473 cur->object_iterate(&rspc); |
3474 | |
3475 // A number of manipulations to make the TAMS be the current top, | |
3476 // and the marked bytes be the ones observed in the iteration. | |
3477 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3478 // The comments below are the postconditions achieved by the | |
3479 // calls. Note especially the last such condition, which says that | |
3480 // the count of marked bytes has been properly restored. | |
3481 cur->note_start_of_marking(false); | |
3482 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3483 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3484 // _next_marked_bytes == prev_marked_bytes. | |
3485 cur->note_end_of_marking(); | |
3486 // _prev_top_at_mark_start == top(), | |
3487 // _prev_marked_bytes == prev_marked_bytes | |
3488 } | |
3489 // If there is no mark in progress, we modified the _next variables | |
3490 // above needlessly, but harmlessly. | |
3491 if (_g1h->mark_in_progress()) { | |
3492 cur->note_start_of_marking(false); | |
3493 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3494 // _next_marked_bytes == next_marked_bytes. | |
3495 } | |
3496 | |
3497 // Now make sure the region has the right index in the sorted array. | |
3498 g1_policy()->note_change_in_marked_bytes(cur); | |
3499 } | |
3500 cur = cur->next_in_collection_set(); | |
3501 } | |
3502 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3503 | |
3504 // Now restore saved marks, if any. | |
3505 if (_objs_with_preserved_marks != NULL) { | |
3506 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3507 assert(_objs_with_preserved_marks->length() == | |
3508 _preserved_marks_of_objs->length(), "Both or none."); | |
3509 guarantee(_objs_with_preserved_marks->length() == | |
3510 _preserved_marks_of_objs->length(), "Both or none."); | |
3511 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3512 oop obj = _objs_with_preserved_marks->at(i); | |
3513 markOop m = _preserved_marks_of_objs->at(i); | |
3514 obj->set_mark(m); | |
3515 } | |
3516 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3517 delete _objs_with_preserved_marks; | |
3518 delete _preserved_marks_of_objs; | |
3519 _objs_with_preserved_marks = NULL; | |
3520 _preserved_marks_of_objs = NULL; | |
3521 } | |
3522 } | |
3523 | |
3524 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3525 _evac_failure_scan_stack->push(obj); | |
3526 } | |
3527 | |
3528 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3529 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3530 | |
3531 while (_evac_failure_scan_stack->length() > 0) { | |
3532 oop obj = _evac_failure_scan_stack->pop(); | |
3533 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3534 obj->oop_iterate_backwards(_evac_failure_closure); | |
3535 } | |
3536 } | |
3537 | |
3538 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3539 markOop m = old->mark(); | |
3540 // forward to self | |
3541 assert(!old->is_forwarded(), "precondition"); | |
3542 | |
3543 old->forward_to(old); | |
3544 handle_evacuation_failure_common(old, m); | |
3545 } | |
3546 | |
3547 oop | |
3548 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3549 oop old) { | |
3550 markOop m = old->mark(); | |
3551 oop forward_ptr = old->forward_to_atomic(old); | |
3552 if (forward_ptr == NULL) { | |
3553 // Forward-to-self succeeded. | |
3554 if (_evac_failure_closure != cl) { | |
3555 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3556 assert(!_drain_in_progress, | |
3557 "Should only be true while someone holds the lock."); | |
3558 // Set the global evac-failure closure to the current thread's. | |
3559 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3560 set_evac_failure_closure(cl); | |
3561 // Now do the common part. | |
3562 handle_evacuation_failure_common(old, m); | |
3563 // Reset to NULL. | |
3564 set_evac_failure_closure(NULL); | |
3565 } else { | |
3566 // The lock is already held, and this is recursive. | |
3567 assert(_drain_in_progress, "This should only be the recursive case."); | |
3568 handle_evacuation_failure_common(old, m); | |
3569 } | |
3570 return old; | |
3571 } else { | |
3572 // Someone else had a place to copy it. | |
3573 return forward_ptr; | |
3574 } | |
3575 } | |
3576 | |
3577 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3578 set_evacuation_failed(true); | |
3579 | |
3580 preserve_mark_if_necessary(old, m); | |
3581 | |
3582 HeapRegion* r = heap_region_containing(old); | |
3583 if (!r->evacuation_failed()) { | |
3584 r->set_evacuation_failed(true); | |
1282 | 3585 if (G1PrintHeapRegions) { |
342 | 3586 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
3587 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3588 r, r->bottom(), r->end()); | |
3589 } | |
3590 } | |
3591 | |
3592 push_on_evac_failure_scan_stack(old); | |
3593 | |
3594 if (!_drain_in_progress) { | |
3595 // prevent recursion in copy_to_survivor_space() | |
3596 _drain_in_progress = true; | |
3597 drain_evac_failure_scan_stack(); | |
3598 _drain_in_progress = false; | |
3599 } | |
3600 } | |
3601 | |
3602 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3603 if (m != markOopDesc::prototype()) { | |
3604 if (_objs_with_preserved_marks == NULL) { | |
3605 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3606 _objs_with_preserved_marks = | |
3607 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3608 _preserved_marks_of_objs = | |
3609 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3610 } | |
3611 _objs_with_preserved_marks->push(obj); | |
3612 _preserved_marks_of_objs->push(m); | |
3613 } | |
3614 } | |
3615 | |
3616 // *** Parallel G1 Evacuation | |
3617 | |
3618 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3619 size_t word_size) { | |
3620 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3621 // let the caller handle alloc failure | |
3622 if (alloc_region == NULL) return NULL; | |
3623 | |
3624 HeapWord* block = alloc_region->par_allocate(word_size); | |
3625 if (block == NULL) { | |
3626 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3627 Mutex::_no_safepoint_check_flag); | |
3628 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3629 } | |
3630 return block; | |
3631 } | |
3632 | |
545 | 3633 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
3634 bool par) { | |
3635 // Another thread might have obtained alloc_region for the given | |
3636 // purpose, and might be attempting to allocate in it, and might | |
3637 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3638 // region below until we're sure the last allocation has happened. | |
3639 // We ensure this by allocating the remaining space with a garbage | |
3640 // object. | |
3641 if (par) par_allocate_remaining_space(alloc_region); | |
3642 // Now we can do the post-GC stuff on the region. | |
3643 alloc_region->note_end_of_copying(); | |
3644 g1_policy()->record_after_bytes(alloc_region->used()); | |
3645 } | |
3646 | |
342 | 3647 HeapWord* |
3648 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3649 HeapRegion* alloc_region, | |
3650 bool par, | |
3651 size_t word_size) { | |
3652 HeapWord* block = NULL; | |
3653 // In the parallel case, a previous thread to obtain the lock may have | |
3654 // already assigned a new gc_alloc_region. | |
3655 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3656 assert(par, "But should only happen in parallel case."); | |
3657 alloc_region = _gc_alloc_regions[purpose]; | |
3658 if (alloc_region == NULL) return NULL; | |
3659 block = alloc_region->par_allocate(word_size); | |
3660 if (block != NULL) return block; | |
3661 // Otherwise, continue; this new region is empty, too. | |
3662 } | |
3663 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 3664 retire_alloc_region(alloc_region, par); |
342 | 3665 |
3666 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3667 // Cannot allocate more regions for the given purpose. | |
3668 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3669 // Is there an alternative? | |
3670 if (purpose != alt_purpose) { | |
3671 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3672 // Has not the alternative region been aliased? | |
545 | 3673 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 3674 // Try to allocate in the alternative region. |
3675 if (par) { | |
3676 block = alt_region->par_allocate(word_size); | |
3677 } else { | |
3678 block = alt_region->allocate(word_size); | |
3679 } | |
3680 // Make an alias. | |
3681 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 3682 if (block != NULL) { |
3683 return block; | |
3684 } | |
3685 retire_alloc_region(alt_region, par); | |
342 | 3686 } |
3687 // Both the allocation region and the alternative one are full | |
3688 // and aliased, replace them with a new allocation region. | |
3689 purpose = alt_purpose; | |
3690 } else { | |
3691 set_gc_alloc_region(purpose, NULL); | |
3692 return NULL; | |
3693 } | |
3694 } | |
3695 | |
3696 // Now allocate a new region for allocation. | |
3697 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3698 | |
3699 // let the caller handle alloc failure | |
3700 if (alloc_region != NULL) { | |
3701 | |
3702 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3703 assert(alloc_region->saved_mark_at_top(), | |
3704 "Mark should have been saved already."); | |
3705 // We used to assert that the region was zero-filled here, but no | |
3706 // longer. | |
3707 | |
3708 // This must be done last: once it's installed, other regions may | |
3709 // allocate in it (without holding the lock.) | |
3710 set_gc_alloc_region(purpose, alloc_region); | |
3711 | |
3712 if (par) { | |
3713 block = alloc_region->par_allocate(word_size); | |
3714 } else { | |
3715 block = alloc_region->allocate(word_size); | |
3716 } | |
3717 // Caller handles alloc failure. | |
3718 } else { | |
3719 // This sets other apis using the same old alloc region to NULL, also. | |
3720 set_gc_alloc_region(purpose, NULL); | |
3721 } | |
3722 return block; // May be NULL. | |
3723 } | |
3724 | |
3725 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3726 HeapWord* block = NULL; | |
3727 size_t free_words; | |
3728 do { | |
3729 free_words = r->free()/HeapWordSize; | |
3730 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
3731 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 3732 // Otherwise, try to claim it. |
3733 block = r->par_allocate(free_words); | |
3734 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3735 fill_with_object(block, free_words); |
342 | 3736 } |
3737 | |
3738 #ifndef PRODUCT | |
3739 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3740 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3741 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3742 return true; | |
3743 } | |
3744 #endif // PRODUCT | |
3745 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3746 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3747 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3748 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3749 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3750 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3751 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3752 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3753 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3754 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3755 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3756 _age_table(false), |
342 | 3757 #if G1_DETAILED_STATS |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3758 _pushes(0), _pops(0), _steals(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3759 _steal_attempts(0), _overflow_pushes(0), |
342 | 3760 #endif |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3761 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3762 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3763 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3764 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3765 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3766 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3767 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3768 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3769 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3770 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3771 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3772 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3773 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3774 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3775 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3776 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3777 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3778 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3779 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3780 _overflowed_refs = new OverflowQueue(10); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3781 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3782 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3783 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3784 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3785 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3786 } |
342 | 3787 |
3788 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | |
3789 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
3790 _par_scan_state(par_scan_state) { } | |
3791 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3792 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 3793 // This is called _after_ do_oop_work has been called, hence after |
3794 // the object has been relocated to its new location and *p points | |
3795 // to its new location. | |
3796 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3797 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3798 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3799 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3800 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 3801 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3802 HeapWord* addr = (HeapWord*)obj; |
342 | 3803 if (_g1->is_in_g1_reserved(addr)) |
3804 _cm->grayRoot(oop(addr)); | |
3805 } | |
3806 } | |
3807 | |
3808 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
3809 size_t word_sz = old->size(); | |
3810 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
3811 // +1 to make the -1 indexes valid... | |
3812 int young_index = from_region->young_index_in_cset()+1; | |
3813 assert( (from_region->is_young() && young_index > 0) || | |
3814 (!from_region->is_young() && young_index == 0), "invariant" ); | |
3815 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
3816 markOop m = old->mark(); | |
545 | 3817 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
3818 : m->age(); | |
3819 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 3820 word_sz); |
3821 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
3822 oop obj = oop(obj_ptr); | |
3823 | |
3824 if (obj_ptr == NULL) { | |
3825 // This will either forward-to-self, or detect that someone else has | |
3826 // installed a forwarding pointer. | |
3827 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
3828 return _g1->handle_evacuation_failure_par(cl, old); | |
3829 } | |
3830 | |
526 | 3831 // We're going to allocate linearly, so might as well prefetch ahead. |
3832 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
3833 | |
342 | 3834 oop forward_ptr = old->forward_to_atomic(obj); |
3835 if (forward_ptr == NULL) { | |
3836 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 3837 if (g1p->track_object_age(alloc_purpose)) { |
3838 // We could simply do obj->incr_age(). However, this causes a | |
3839 // performance issue. obj->incr_age() will first check whether | |
3840 // the object has a displaced mark by checking its mark word; | |
3841 // getting the mark word from the new location of the object | |
3842 // stalls. So, given that we already have the mark word and we | |
3843 // are about to install it anyway, it's better to increase the | |
3844 // age on the mark word, when the object does not have a | |
3845 // displaced mark word. We're not expecting many objects to have | |
3846 // a displaced marked word, so that case is not optimized | |
3847 // further (it could be...) and we simply call obj->incr_age(). | |
3848 | |
3849 if (m->has_displaced_mark_helper()) { | |
3850 // in this case, we have to install the mark word first, | |
3851 // otherwise obj looks to be forwarded (the old mark word, | |
3852 // which contains the forward pointer, was copied) | |
3853 obj->set_mark(m); | |
3854 obj->incr_age(); | |
3855 } else { | |
3856 m = m->incr_age(); | |
545 | 3857 obj->set_mark(m); |
526 | 3858 } |
545 | 3859 _par_scan_state->age_table()->add(obj, word_sz); |
3860 } else { | |
3861 obj->set_mark(m); | |
526 | 3862 } |
3863 | |
342 | 3864 // preserve "next" mark bit |
3865 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
3866 if (!use_local_bitmaps || | |
3867 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
3868 // if we couldn't mark it on the local bitmap (this happens when | |
3869 // the object was not allocated in the GCLab), we have to bite | |
3870 // the bullet and do the standard parallel mark | |
3871 _cm->markAndGrayObjectIfNecessary(obj); | |
3872 } | |
3873 #if 1 | |
3874 if (_g1->isMarkedNext(old)) { | |
3875 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
3876 } | |
3877 #endif | |
3878 } | |
3879 | |
3880 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
3881 surv_young_words[young_index] += word_sz; | |
3882 | |
3883 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
3884 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3885 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3886 _par_scan_state->push_on_queue(old_p); |
342 | 3887 } else { |
526 | 3888 // No point in using the slower heap_region_containing() method, |
3889 // given that we know obj is in the heap. | |
3890 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 3891 obj->oop_iterate_backwards(_scanner); |
3892 } | |
3893 } else { | |
3894 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
3895 obj = forward_ptr; | |
3896 } | |
3897 return obj; | |
3898 } | |
3899 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3900 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3901 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3902 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3903 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3904 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 3905 assert(barrier != G1BarrierRS || obj != NULL, |
3906 "Precondition: G1BarrierRS implies obj is nonNull"); | |
3907 | |
526 | 3908 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3909 if (_g1->in_cset_fast_test(obj)) { |
342 | 3910 #if G1_REM_SET_LOGGING |
526 | 3911 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
3912 "into CS.", p, (void*) obj); | |
342 | 3913 #endif |
526 | 3914 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3915 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 3916 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3917 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3918 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 3919 } |
526 | 3920 // When scanning the RS, we only care about objs in CS. |
3921 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3922 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 3923 } |
526 | 3924 } |
3925 | |
3926 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3927 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 3928 } |
3929 | |
3930 if (do_gen_barrier && obj != NULL) { | |
3931 par_do_barrier(p); | |
3932 } | |
3933 } | |
3934 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3935 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3936 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3937 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3938 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 3939 assert(has_partial_array_mask(p), "invariant"); |
3940 oop old = clear_partial_array_mask(p); | |
342 | 3941 assert(old->is_objArray(), "must be obj array"); |
3942 assert(old->is_forwarded(), "must be forwarded"); | |
3943 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
3944 | |
3945 objArrayOop obj = objArrayOop(old->forwardee()); | |
3946 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
3947 // Process ParGCArrayScanChunk elements now | |
3948 // and push the remainder back onto queue | |
3949 int start = arrayOop(old)->length(); | |
3950 int end = obj->length(); | |
3951 int remainder = end - start; | |
3952 assert(start <= end, "just checking"); | |
3953 if (remainder > 2 * ParGCArrayScanChunk) { | |
3954 // Test above combines last partial chunk with a full chunk | |
3955 end = start + ParGCArrayScanChunk; | |
3956 arrayOop(old)->set_length(end); | |
3957 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3958 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3959 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3960 _par_scan_state->push_on_queue(old_p); |
342 | 3961 } else { |
3962 // Restore length so that the heap remains parsable in | |
3963 // case of evacuation failure. | |
3964 arrayOop(old)->set_length(end); | |
3965 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3966 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 3967 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3968 obj->oop_iterate_range(&_scanner, start, end); |
342 | 3969 } |
3970 | |
3971 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
3972 protected: | |
3973 G1CollectedHeap* _g1h; | |
3974 G1ParScanThreadState* _par_scan_state; | |
3975 RefToScanQueueSet* _queues; | |
3976 ParallelTaskTerminator* _terminator; | |
3977 | |
3978 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
3979 RefToScanQueueSet* queues() { return _queues; } | |
3980 ParallelTaskTerminator* terminator() { return _terminator; } | |
3981 | |
3982 public: | |
3983 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
3984 G1ParScanThreadState* par_scan_state, | |
3985 RefToScanQueueSet* queues, | |
3986 ParallelTaskTerminator* terminator) | |
3987 : _g1h(g1h), _par_scan_state(par_scan_state), | |
3988 _queues(queues), _terminator(terminator) {} | |
3989 | |
3990 void do_void() { | |
3991 G1ParScanThreadState* pss = par_scan_state(); | |
3992 while (true) { | |
3993 pss->trim_queue(); | |
3994 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3995 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3996 StarTask stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3997 if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
342 | 3998 IF_G1_DETAILED_STATS(pss->note_steal()); |
526 | 3999 |
4000 // slightly paranoid tests; I'm trying to catch potential | |
4001 // problems before we go into push_on_queue to know where the | |
4002 // problem is coming from | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4003 assert((oop*)stolen_task != NULL, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4004 if (stolen_task.is_narrow()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4005 assert(UseCompressedOops, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4006 narrowOop* p = (narrowOop*) stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4007 assert(has_partial_array_mask(p) || |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4008 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4009 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4010 } else { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4011 oop* p = (oop*) stolen_task; |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4012 assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4013 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4014 } |
342 | 4015 continue; |
4016 } | |
4017 pss->start_term_time(); | |
4018 if (terminator()->offer_termination()) break; | |
4019 pss->end_term_time(); | |
4020 } | |
4021 pss->end_term_time(); | |
4022 pss->retire_alloc_buffers(); | |
4023 } | |
4024 }; | |
4025 | |
4026 class G1ParTask : public AbstractGangTask { | |
4027 protected: | |
4028 G1CollectedHeap* _g1h; | |
4029 RefToScanQueueSet *_queues; | |
4030 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4031 int _n_workers; |
342 | 4032 |
4033 Mutex _stats_lock; | |
4034 Mutex* stats_lock() { return &_stats_lock; } | |
4035 | |
4036 size_t getNCards() { | |
4037 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4038 / G1BlockOffsetSharedArray::N_bytes; | |
4039 } | |
4040 | |
4041 public: | |
4042 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4043 : AbstractGangTask("G1 collection"), | |
4044 _g1h(g1h), | |
4045 _queues(task_queues), | |
4046 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4047 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4048 _n_workers(workers) |
342 | 4049 {} |
4050 | |
4051 RefToScanQueueSet* queues() { return _queues; } | |
4052 | |
4053 RefToScanQueue *work_queue(int i) { | |
4054 return queues()->queue(i); | |
4055 } | |
4056 | |
4057 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4058 if (i >= _n_workers) return; // no work needed this round |
1611 | 4059 |
4060 double start_time_ms = os::elapsedTime() * 1000.0; | |
4061 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4062 | |
342 | 4063 ResourceMark rm; |
4064 HandleMark hm; | |
4065 | |
526 | 4066 G1ParScanThreadState pss(_g1h, i); |
4067 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4068 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4069 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4070 |
4071 pss.set_evac_closure(&scan_evac_cl); | |
4072 pss.set_evac_failure_closure(&evac_failure_cl); | |
4073 pss.set_partial_scan_closure(&partial_scan_cl); | |
4074 | |
4075 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4076 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4077 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4078 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4079 |
342 | 4080 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4081 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4082 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4083 | |
4084 OopsInHeapRegionClosure *scan_root_cl; | |
4085 OopsInHeapRegionClosure *scan_perm_cl; | |
4086 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4087 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4088 scan_root_cl = &scan_mark_root_cl; |
4089 scan_perm_cl = &scan_mark_perm_cl; | |
4090 } else { | |
4091 scan_root_cl = &only_scan_root_cl; | |
4092 scan_perm_cl = &only_scan_perm_cl; | |
4093 } | |
4094 | |
4095 pss.start_strong_roots(); | |
4096 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4097 SharedHeap::SO_AllClasses, | |
4098 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4099 &push_heap_rs_cl, |
342 | 4100 scan_perm_cl, |
4101 i); | |
4102 pss.end_strong_roots(); | |
4103 { | |
4104 double start = os::elapsedTime(); | |
4105 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4106 evac.do_void(); | |
4107 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4108 double term_ms = pss.term_time()*1000.0; | |
4109 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4110 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4111 } |
1282 | 4112 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4113 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4114 | |
4115 // Clean up any par-expanded rem sets. | |
4116 HeapRegionRemSet::par_cleanup(); | |
4117 | |
4118 MutexLocker x(stats_lock()); | |
4119 if (ParallelGCVerbose) { | |
4120 gclog_or_tty->print("Thread %d complete:\n", i); | |
4121 #if G1_DETAILED_STATS | |
4122 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", | |
4123 pss.pushes(), | |
4124 pss.pops(), | |
4125 pss.overflow_pushes(), | |
4126 pss.steals(), | |
4127 pss.steal_attempts()); | |
4128 #endif | |
4129 double elapsed = pss.elapsed(); | |
4130 double strong_roots = pss.strong_roots_time(); | |
4131 double term = pss.term_time(); | |
4132 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" | |
4133 " Strong roots: %7.2f ms (%6.2f%%)\n" | |
1611 | 4134 " Termination: %7.2f ms (%6.2f%%) " |
4135 "(in "SIZE_FORMAT" entries)\n", | |
342 | 4136 elapsed * 1000.0, |
4137 strong_roots * 1000.0, (strong_roots*100.0/elapsed), | |
4138 term * 1000.0, (term*100.0/elapsed), | |
4139 pss.term_attempts()); | |
4140 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); | |
4141 gclog_or_tty->print(" Waste: %8dK\n" | |
4142 " Alloc Buffer: %8dK\n" | |
4143 " Undo: %8dK\n", | |
4144 (total_waste * HeapWordSize) / K, | |
4145 (pss.alloc_buffer_waste() * HeapWordSize) / K, | |
4146 (pss.undo_waste() * HeapWordSize) / K); | |
4147 } | |
4148 | |
4149 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
4150 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
1611 | 4151 double end_time_ms = os::elapsedTime() * 1000.0; |
4152 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4153 } |
4154 }; | |
4155 | |
4156 // *** Common G1 Evacuation Stuff | |
4157 | |
4158 void | |
4159 G1CollectedHeap:: | |
4160 g1_process_strong_roots(bool collecting_perm_gen, | |
4161 SharedHeap::ScanningOption so, | |
4162 OopClosure* scan_non_heap_roots, | |
4163 OopsInHeapRegionClosure* scan_rs, | |
4164 OopsInGenClosure* scan_perm, | |
4165 int worker_i) { | |
4166 // First scan the strong roots, including the perm gen. | |
4167 double ext_roots_start = os::elapsedTime(); | |
4168 double closure_app_time_sec = 0.0; | |
4169 | |
4170 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4171 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4172 buf_scan_perm.set_generation(perm_gen()); | |
4173 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4174 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4175 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4176 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4177 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4178 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4179 collecting_perm_gen, so, |
342 | 4180 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4181 &eager_scan_code_roots, |
342 | 4182 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4183 |
342 | 4184 // Finish up any enqueued closure apps. |
4185 buf_scan_non_heap_roots.done(); | |
4186 buf_scan_perm.done(); | |
4187 double ext_roots_end = os::elapsedTime(); | |
4188 g1_policy()->reset_obj_copy_time(worker_i); | |
4189 double obj_copy_time_sec = | |
4190 buf_scan_non_heap_roots.closure_app_seconds() + | |
4191 buf_scan_perm.closure_app_seconds(); | |
4192 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4193 double ext_root_time_ms = | |
4194 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4195 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4196 | |
4197 // Scan strong roots in mark stack. | |
4198 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4199 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4200 } | |
4201 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4202 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4203 | |
4204 // XXX What should this be doing in the parallel case? | |
4205 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4206 // Now scan the complement of the collection set. | |
4207 if (scan_rs != NULL) { | |
4208 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4209 } | |
4210 // Finish with the ref_processor roots. | |
4211 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4212 ref_processor()->oops_do(scan_non_heap_roots); | |
4213 } | |
4214 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4215 _process_strong_tasks->all_tasks_completed(); | |
4216 } | |
4217 | |
4218 void | |
4219 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4220 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4221 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4222 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4223 } |
4224 | |
4225 | |
4226 class SaveMarksClosure: public HeapRegionClosure { | |
4227 public: | |
4228 bool doHeapRegion(HeapRegion* r) { | |
4229 r->save_marks(); | |
4230 return false; | |
4231 } | |
4232 }; | |
4233 | |
4234 void G1CollectedHeap::save_marks() { | |
4235 if (ParallelGCThreads == 0) { | |
4236 SaveMarksClosure sm; | |
4237 heap_region_iterate(&sm); | |
4238 } | |
4239 // We do this even in the parallel case | |
4240 perm_gen()->save_marks(); | |
4241 } | |
4242 | |
4243 void G1CollectedHeap::evacuate_collection_set() { | |
4244 set_evacuation_failed(false); | |
4245 | |
4246 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4247 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4248 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4249 | |
342 | 4250 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4251 set_par_threads(n_workers); | |
4252 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4253 | |
4254 init_for_evac_failure(NULL); | |
4255 | |
4256 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4257 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4258 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4259 double start_par = os::elapsedTime(); |
4260 if (ParallelGCThreads > 0) { | |
4261 // The individual threads will set their evac-failure closures. | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4262 StrongRootsScope srs(this); |
342 | 4263 workers()->run_task(&g1_par_task); |
4264 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4265 StrongRootsScope srs(this); |
342 | 4266 g1_par_task.work(0); |
4267 } | |
4268 | |
4269 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4270 g1_policy()->record_par_time(par_time); | |
4271 set_par_threads(0); | |
4272 // Is this the right thing to do here? We don't save marks | |
4273 // on individual heap regions when we allocate from | |
4274 // them in parallel, so this seems like the correct place for this. | |
545 | 4275 retire_all_alloc_regions(); |
342 | 4276 { |
4277 G1IsAliveClosure is_alive(this); | |
4278 G1KeepAliveClosure keep_alive(this); | |
4279 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4280 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4281 release_gc_alloc_regions(false /* totally */); |
342 | 4282 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4283 |
889 | 4284 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4285 concurrent_g1_refine()->set_use_cache(true); |
4286 | |
4287 finalize_for_evac_failure(); | |
4288 | |
4289 // Must do this before removing self-forwarding pointers, which clears | |
4290 // the per-region evac-failure flags. | |
4291 concurrent_mark()->complete_marking_in_collection_set(); | |
4292 | |
4293 if (evacuation_failed()) { | |
4294 remove_self_forwarding_pointers(); | |
4295 if (PrintGCDetails) { | |
4296 gclog_or_tty->print(" (evacuation failed)"); | |
4297 } else if (PrintGC) { | |
4298 gclog_or_tty->print("--"); | |
4299 } | |
4300 } | |
4301 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4302 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4303 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4304 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4305 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4306 |
4307 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4308 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4309 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4310 } |
342 | 4311 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4312 } | |
4313 | |
4314 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4315 size_t pre_used = 0; | |
4316 size_t cleared_h_regions = 0; | |
4317 size_t freed_regions = 0; | |
4318 UncleanRegionList local_list; | |
4319 | |
4320 HeapWord* start = hr->bottom(); | |
4321 HeapWord* end = hr->prev_top_at_mark_start(); | |
4322 size_t used_bytes = hr->used(); | |
4323 size_t live_bytes = hr->max_live_bytes(); | |
4324 if (used_bytes > 0) { | |
4325 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4326 } else { | |
4327 guarantee( live_bytes == 0, "invariant" ); | |
4328 } | |
4329 | |
4330 size_t garbage_bytes = used_bytes - live_bytes; | |
4331 if (garbage_bytes > 0) | |
4332 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4333 | |
4334 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4335 &local_list); | |
4336 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4337 &local_list); | |
4338 } | |
4339 | |
4340 void | |
4341 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4342 size_t& pre_used, | |
4343 size_t& cleared_h_regions, | |
4344 size_t& freed_regions, | |
4345 UncleanRegionList* list, | |
4346 bool par) { | |
4347 pre_used += hr->used(); | |
4348 if (hr->isHumongous()) { | |
4349 assert(hr->startsHumongous(), | |
4350 "Only the start of a humongous region should be freed."); | |
4351 int ind = _hrs->find(hr); | |
4352 assert(ind != -1, "Should have an index."); | |
4353 // Clear the start region. | |
4354 hr->hr_clear(par, true /*clear_space*/); | |
4355 list->insert_before_head(hr); | |
4356 cleared_h_regions++; | |
4357 freed_regions++; | |
4358 // Clear any continued regions. | |
4359 ind++; | |
4360 while ((size_t)ind < n_regions()) { | |
4361 HeapRegion* hrc = _hrs->at(ind); | |
4362 if (!hrc->continuesHumongous()) break; | |
4363 // Otherwise, does continue the H region. | |
4364 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4365 hrc->hr_clear(par, true /*clear_space*/); | |
4366 cleared_h_regions++; | |
4367 freed_regions++; | |
4368 list->insert_before_head(hrc); | |
4369 ind++; | |
4370 } | |
4371 } else { | |
4372 hr->hr_clear(par, true /*clear_space*/); | |
4373 list->insert_before_head(hr); | |
4374 freed_regions++; | |
4375 // If we're using clear2, this should not be enabled. | |
4376 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4377 } | |
4378 } | |
4379 | |
4380 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4381 size_t cleared_h_regions, | |
4382 size_t freed_regions, | |
4383 UncleanRegionList* list) { | |
4384 if (list != NULL && list->sz() > 0) { | |
4385 prepend_region_list_on_unclean_list(list); | |
4386 } | |
4387 // Acquire a lock, if we're parallel, to update possibly-shared | |
4388 // variables. | |
4389 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4390 { | |
4391 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4392 _summary_bytes_used -= pre_used; | |
4393 _num_humongous_regions -= (int) cleared_h_regions; | |
4394 _free_regions += freed_regions; | |
4395 } | |
4396 } | |
4397 | |
4398 | |
4399 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4400 while (list != NULL) { | |
4401 guarantee( list->is_young(), "invariant" ); | |
4402 | |
4403 HeapWord* bottom = list->bottom(); | |
4404 HeapWord* end = list->end(); | |
4405 MemRegion mr(bottom, end); | |
4406 ct_bs->dirty(mr); | |
4407 | |
4408 list = list->get_next_young_region(); | |
4409 } | |
4410 } | |
4411 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4412 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4413 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4414 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4415 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4416 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4417 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4418 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4419 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4420 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4421 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4422 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4423 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4424 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4425 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4426 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4427 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4428 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4429 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4430 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4431 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4432 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4433 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4434 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4435 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4436 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4437 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4438 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4439 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4440 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4441 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4442 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4443 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4444 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4445 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4446 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4447 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4448 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4449 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4450 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4451 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4452 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4453 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4454 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4455 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4456 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4457 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4458 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4459 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4460 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4461 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4462 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4463 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4464 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4465 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4466 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4467 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4468 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4469 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4470 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4471 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4472 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4473 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4474 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4475 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4476 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4477 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4478 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4479 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4480 |
342 | 4481 void G1CollectedHeap::cleanUpCardTable() { |
4482 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4483 double start = os::elapsedTime(); | |
4484 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4485 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4486 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4487 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4488 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4489 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4490 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4491 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4492 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4493 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4494 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4495 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4496 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4497 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4498 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4499 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4500 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4501 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4502 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4503 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4504 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4505 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4506 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4507 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4508 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4509 |
342 | 4510 double elapsed = os::elapsedTime() - start; |
4511 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4512 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4513 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4514 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4515 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4516 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4517 #endif |
342 | 4518 } |
4519 | |
4520 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4521 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4522 do_collection_pause(); | |
4523 } | |
4524 } | |
4525 | |
4526 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4527 double young_time_ms = 0.0; | |
4528 double non_young_time_ms = 0.0; | |
4529 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4530 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4531 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4532 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4533 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4534 |
342 | 4535 G1CollectorPolicy* policy = g1_policy(); |
4536 | |
4537 double start_sec = os::elapsedTime(); | |
4538 bool non_young = true; | |
4539 | |
4540 HeapRegion* cur = cs_head; | |
4541 int age_bound = -1; | |
4542 size_t rs_lengths = 0; | |
4543 | |
4544 while (cur != NULL) { | |
4545 if (non_young) { | |
4546 if (cur->is_young()) { | |
4547 double end_sec = os::elapsedTime(); | |
4548 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4549 non_young_time_ms += elapsed_ms; | |
4550 | |
4551 start_sec = os::elapsedTime(); | |
4552 non_young = false; | |
4553 } | |
4554 } else { | |
4555 if (!cur->is_on_free_list()) { | |
4556 double end_sec = os::elapsedTime(); | |
4557 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4558 young_time_ms += elapsed_ms; | |
4559 | |
4560 start_sec = os::elapsedTime(); | |
4561 non_young = true; | |
4562 } | |
4563 } | |
4564 | |
4565 rs_lengths += cur->rem_set()->occupied(); | |
4566 | |
4567 HeapRegion* next = cur->next_in_collection_set(); | |
4568 assert(cur->in_collection_set(), "bad CS"); | |
4569 cur->set_next_in_collection_set(NULL); | |
4570 cur->set_in_collection_set(false); | |
4571 | |
4572 if (cur->is_young()) { | |
4573 int index = cur->young_index_in_cset(); | |
4574 guarantee( index != -1, "invariant" ); | |
4575 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4576 size_t words_survived = _surviving_young_words[index]; | |
4577 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4578 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4579 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4580 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4581 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4582 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4583 cur->set_next_young_region(NULL); |
342 | 4584 } else { |
4585 int index = cur->young_index_in_cset(); | |
4586 guarantee( index == -1, "invariant" ); | |
4587 } | |
4588 | |
4589 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4590 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4591 "invariant" ); | |
4592 | |
4593 if (!cur->evacuation_failed()) { | |
4594 // And the region is empty. | |
4595 assert(!cur->is_empty(), | |
4596 "Should not have empty regions in a CS."); | |
4597 free_region(cur); | |
4598 } else { | |
4599 cur->uninstall_surv_rate_group(); | |
4600 if (cur->is_young()) | |
4601 cur->set_young_index_in_cset(-1); | |
4602 cur->set_not_young(); | |
4603 cur->set_evacuation_failed(false); | |
4604 } | |
4605 cur = next; | |
4606 } | |
4607 | |
4608 policy->record_max_rs_lengths(rs_lengths); | |
4609 policy->cset_regions_freed(); | |
4610 | |
4611 double end_sec = os::elapsedTime(); | |
4612 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4613 if (non_young) | |
4614 non_young_time_ms += elapsed_ms; | |
4615 else | |
4616 young_time_ms += elapsed_ms; | |
4617 | |
4618 policy->record_young_free_cset_time_ms(young_time_ms); | |
4619 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4620 } | |
4621 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4622 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4623 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4624 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4625 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4626 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4627 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4628 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4629 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4630 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4631 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4632 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4633 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4634 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4635 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4636 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4637 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4638 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4639 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4640 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4641 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4642 |
342 | 4643 HeapRegion* |
4644 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4645 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4646 HeapRegion* res = pop_unclean_region_list_locked(); | |
4647 if (res != NULL) { | |
4648 assert(!res->continuesHumongous() && | |
4649 res->zero_fill_state() != HeapRegion::Allocated, | |
4650 "Only free regions on unclean list."); | |
4651 if (zero_filled) { | |
4652 res->ensure_zero_filled_locked(); | |
4653 res->set_zero_fill_allocated(); | |
4654 } | |
4655 } | |
4656 return res; | |
4657 } | |
4658 | |
4659 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4660 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4661 return alloc_region_from_unclean_list_locked(zero_filled); | |
4662 } | |
4663 | |
4664 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4665 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4666 put_region_on_unclean_list_locked(r); | |
4667 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4668 } | |
4669 | |
4670 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4671 MutexLockerEx x(Cleanup_mon); | |
4672 set_unclean_regions_coming_locked(b); | |
4673 } | |
4674 | |
4675 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4676 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4677 _unclean_regions_coming = b; | |
4678 // Wake up mutator threads that might be waiting for completeCleanup to | |
4679 // finish. | |
4680 if (!b) Cleanup_mon->notify_all(); | |
4681 } | |
4682 | |
4683 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4684 MutexLockerEx x(Cleanup_mon); | |
4685 wait_for_cleanup_complete_locked(); | |
4686 } | |
4687 | |
4688 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4689 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4690 while (_unclean_regions_coming) { | |
4691 Cleanup_mon->wait(); | |
4692 } | |
4693 } | |
4694 | |
4695 void | |
4696 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4697 assert(ZF_mon->owned_by_self(), "precondition."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4698 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4699 if (r->is_gc_alloc_region()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4700 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4701 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4702 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4703 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4704 region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4705 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
4706 #endif |
342 | 4707 _unclean_region_list.insert_before_head(r); |
4708 } | |
4709 | |
4710 void | |
4711 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4712 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4713 prepend_region_list_on_unclean_list_locked(list); | |
4714 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4715 } | |
4716 | |
4717 void | |
4718 G1CollectedHeap:: | |
4719 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4720 assert(ZF_mon->owned_by_self(), "precondition."); | |
4721 _unclean_region_list.prepend_list(list); | |
4722 } | |
4723 | |
4724 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4725 assert(ZF_mon->owned_by_self(), "precondition."); | |
4726 HeapRegion* res = _unclean_region_list.pop(); | |
4727 if (res != NULL) { | |
4728 // Inform ZF thread that there's a new unclean head. | |
4729 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4730 ZF_mon->notify_all(); | |
4731 } | |
4732 return res; | |
4733 } | |
4734 | |
4735 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4736 assert(ZF_mon->owned_by_self(), "precondition."); | |
4737 return _unclean_region_list.hd(); | |
4738 } | |
4739 | |
4740 | |
4741 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4742 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4743 HeapRegion* r = peek_unclean_region_list_locked(); | |
4744 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
4745 // Result of below must be equal to "r", since we hold the lock. | |
4746 (void)pop_unclean_region_list_locked(); | |
4747 put_free_region_on_list_locked(r); | |
4748 return true; | |
4749 } else { | |
4750 return false; | |
4751 } | |
4752 } | |
4753 | |
4754 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
4755 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4756 return move_cleaned_region_to_free_list_locked(); | |
4757 } | |
4758 | |
4759 | |
4760 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
4761 assert(ZF_mon->owned_by_self(), "precondition."); | |
4762 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4763 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
4764 "Regions on free list must be zero filled"); | |
4765 assert(!r->isHumongous(), "Must not be humongous."); | |
4766 assert(r->is_empty(), "Better be empty"); | |
4767 assert(!r->is_on_free_list(), | |
4768 "Better not already be on free list"); | |
4769 assert(!r->is_on_unclean_list(), | |
4770 "Better not already be on unclean list"); | |
4771 r->set_on_free_list(true); | |
4772 r->set_next_on_free_list(_free_region_list); | |
4773 _free_region_list = r; | |
4774 _free_region_list_size++; | |
4775 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4776 } | |
4777 | |
4778 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
4779 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4780 put_free_region_on_list_locked(r); | |
4781 } | |
4782 | |
4783 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
4784 assert(ZF_mon->owned_by_self(), "precondition."); | |
4785 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4786 HeapRegion* res = _free_region_list; | |
4787 if (res != NULL) { | |
4788 _free_region_list = res->next_from_free_list(); | |
4789 _free_region_list_size--; | |
4790 res->set_on_free_list(false); | |
4791 res->set_next_on_free_list(NULL); | |
4792 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4793 } | |
4794 return res; | |
4795 } | |
4796 | |
4797 | |
4798 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
4799 // By self, or on behalf of self. | |
4800 assert(Heap_lock->is_locked(), "Precondition"); | |
4801 HeapRegion* res = NULL; | |
4802 bool first = true; | |
4803 while (res == NULL) { | |
4804 if (zero_filled || !first) { | |
4805 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4806 res = pop_free_region_list_locked(); | |
4807 if (res != NULL) { | |
4808 assert(!res->zero_fill_is_allocated(), | |
4809 "No allocated regions on free list."); | |
4810 res->set_zero_fill_allocated(); | |
4811 } else if (!first) { | |
4812 break; // We tried both, time to return NULL. | |
4813 } | |
4814 } | |
4815 | |
4816 if (res == NULL) { | |
4817 res = alloc_region_from_unclean_list(zero_filled); | |
4818 } | |
4819 assert(res == NULL || | |
4820 !zero_filled || | |
4821 res->zero_fill_is_allocated(), | |
4822 "We must have allocated the region we're returning"); | |
4823 first = false; | |
4824 } | |
4825 return res; | |
4826 } | |
4827 | |
4828 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
4829 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4830 { | |
4831 HeapRegion* prev = NULL; | |
4832 HeapRegion* cur = _unclean_region_list.hd(); | |
4833 while (cur != NULL) { | |
4834 HeapRegion* next = cur->next_from_unclean_list(); | |
4835 if (cur->zero_fill_is_allocated()) { | |
4836 // Remove from the list. | |
4837 if (prev == NULL) { | |
4838 (void)_unclean_region_list.pop(); | |
4839 } else { | |
4840 _unclean_region_list.delete_after(prev); | |
4841 } | |
4842 cur->set_on_unclean_list(false); | |
4843 cur->set_next_on_unclean_list(NULL); | |
4844 } else { | |
4845 prev = cur; | |
4846 } | |
4847 cur = next; | |
4848 } | |
4849 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
4850 "Inv"); | |
4851 } | |
4852 | |
4853 { | |
4854 HeapRegion* prev = NULL; | |
4855 HeapRegion* cur = _free_region_list; | |
4856 while (cur != NULL) { | |
4857 HeapRegion* next = cur->next_from_free_list(); | |
4858 if (cur->zero_fill_is_allocated()) { | |
4859 // Remove from the list. | |
4860 if (prev == NULL) { | |
4861 _free_region_list = cur->next_from_free_list(); | |
4862 } else { | |
4863 prev->set_next_on_free_list(cur->next_from_free_list()); | |
4864 } | |
4865 cur->set_on_free_list(false); | |
4866 cur->set_next_on_free_list(NULL); | |
4867 _free_region_list_size--; | |
4868 } else { | |
4869 prev = cur; | |
4870 } | |
4871 cur = next; | |
4872 } | |
4873 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4874 } | |
4875 } | |
4876 | |
4877 bool G1CollectedHeap::verify_region_lists() { | |
4878 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4879 return verify_region_lists_locked(); | |
4880 } | |
4881 | |
4882 bool G1CollectedHeap::verify_region_lists_locked() { | |
4883 HeapRegion* unclean = _unclean_region_list.hd(); | |
4884 while (unclean != NULL) { | |
4885 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
4886 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
4887 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
4888 "Everything else is possible."); | |
4889 unclean = unclean->next_from_unclean_list(); | |
4890 } | |
4891 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
4892 | |
4893 HeapRegion* free_r = _free_region_list; | |
4894 while (free_r != NULL) { | |
4895 assert(free_r->is_on_free_list(), "Well, it is!"); | |
4896 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
4897 switch (free_r->zero_fill_state()) { | |
4898 case HeapRegion::NotZeroFilled: | |
4899 case HeapRegion::ZeroFilling: | |
4900 guarantee(false, "Should not be on free list."); | |
4901 break; | |
4902 default: | |
4903 // Everything else is possible. | |
4904 break; | |
4905 } | |
4906 free_r = free_r->next_from_free_list(); | |
4907 } | |
4908 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
4909 // If we didn't do an assertion... | |
4910 return true; | |
4911 } | |
4912 | |
4913 size_t G1CollectedHeap::free_region_list_length() { | |
4914 assert(ZF_mon->owned_by_self(), "precondition."); | |
4915 size_t len = 0; | |
4916 HeapRegion* cur = _free_region_list; | |
4917 while (cur != NULL) { | |
4918 len++; | |
4919 cur = cur->next_from_free_list(); | |
4920 } | |
4921 return len; | |
4922 } | |
4923 | |
4924 size_t G1CollectedHeap::unclean_region_list_length() { | |
4925 assert(ZF_mon->owned_by_self(), "precondition."); | |
4926 return _unclean_region_list.length(); | |
4927 } | |
4928 | |
4929 size_t G1CollectedHeap::n_regions() { | |
4930 return _hrs->length(); | |
4931 } | |
4932 | |
4933 size_t G1CollectedHeap::max_regions() { | |
4934 return | |
4935 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
4936 HeapRegion::GrainBytes; | |
4937 } | |
4938 | |
4939 size_t G1CollectedHeap::free_regions() { | |
4940 /* Possibly-expensive assert. | |
4941 assert(_free_regions == count_free_regions(), | |
4942 "_free_regions is off."); | |
4943 */ | |
4944 return _free_regions; | |
4945 } | |
4946 | |
4947 bool G1CollectedHeap::should_zf() { | |
4948 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
4949 } | |
4950 | |
4951 class RegionCounter: public HeapRegionClosure { | |
4952 size_t _n; | |
4953 public: | |
4954 RegionCounter() : _n(0) {} | |
4955 bool doHeapRegion(HeapRegion* r) { | |
677 | 4956 if (r->is_empty()) { |
342 | 4957 assert(!r->isHumongous(), "H regions should not be empty."); |
4958 _n++; | |
4959 } | |
4960 return false; | |
4961 } | |
4962 int res() { return (int) _n; } | |
4963 }; | |
4964 | |
4965 size_t G1CollectedHeap::count_free_regions() { | |
4966 RegionCounter rc; | |
4967 heap_region_iterate(&rc); | |
4968 size_t n = rc.res(); | |
4969 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
4970 n--; | |
4971 return n; | |
4972 } | |
4973 | |
4974 size_t G1CollectedHeap::count_free_regions_list() { | |
4975 size_t n = 0; | |
4976 size_t o = 0; | |
4977 ZF_mon->lock_without_safepoint_check(); | |
4978 HeapRegion* cur = _free_region_list; | |
4979 while (cur != NULL) { | |
4980 cur = cur->next_from_free_list(); | |
4981 n++; | |
4982 } | |
4983 size_t m = unclean_region_list_length(); | |
4984 ZF_mon->unlock(); | |
4985 return n + m; | |
4986 } | |
4987 | |
4988 bool G1CollectedHeap::should_set_young_locked() { | |
4989 assert(heap_lock_held_for_gc(), | |
4990 "the heap lock should already be held by or for this thread"); | |
4991 return (g1_policy()->in_young_gc_mode() && | |
4992 g1_policy()->should_add_next_region_to_young_list()); | |
4993 } | |
4994 | |
4995 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
4996 assert(heap_lock_held_for_gc(), | |
4997 "the heap lock should already be held by or for this thread"); | |
4998 _young_list->push_region(hr); | |
4999 g1_policy()->set_region_short_lived(hr); | |
5000 } | |
5001 | |
5002 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5003 private: | |
5004 bool _success; | |
5005 public: | |
5006 NoYoungRegionsClosure() : _success(true) { } | |
5007 bool doHeapRegion(HeapRegion* r) { | |
5008 if (r->is_young()) { | |
5009 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5010 r->bottom(), r->end()); | |
5011 _success = false; | |
5012 } | |
5013 return false; | |
5014 } | |
5015 bool success() { return _success; } | |
5016 }; | |
5017 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5018 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5019 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5020 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5021 if (check_heap) { |
342 | 5022 NoYoungRegionsClosure closure; |
5023 heap_region_iterate(&closure); | |
5024 ret = ret && closure.success(); | |
5025 } | |
5026 | |
5027 return ret; | |
5028 } | |
5029 | |
5030 void G1CollectedHeap::empty_young_list() { | |
5031 assert(heap_lock_held_for_gc(), | |
5032 "the heap lock should already be held by or for this thread"); | |
5033 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5034 | |
5035 _young_list->empty_list(); | |
5036 } | |
5037 | |
5038 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5039 bool no_allocs = true; | |
5040 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5041 HeapRegion* r = _gc_alloc_regions[ap]; | |
5042 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5043 } | |
5044 return no_allocs; | |
5045 } | |
5046 | |
545 | 5047 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5048 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5049 HeapRegion* r = _gc_alloc_regions[ap]; | |
5050 if (r != NULL) { | |
5051 // Check for aliases. | |
5052 bool has_processed_alias = false; | |
5053 for (int i = 0; i < ap; ++i) { | |
5054 if (_gc_alloc_regions[i] == r) { | |
5055 has_processed_alias = true; | |
5056 break; | |
5057 } | |
5058 } | |
5059 if (!has_processed_alias) { | |
545 | 5060 retire_alloc_region(r, false /* par */); |
342 | 5061 } |
5062 } | |
5063 } | |
5064 } | |
5065 | |
5066 | |
5067 // Done at the start of full GC. | |
5068 void G1CollectedHeap::tear_down_region_lists() { | |
5069 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5070 while (pop_unclean_region_list_locked() != NULL) ; | |
5071 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1394
diff
changeset
|
5072 "Postconditions of loop."); |
342 | 5073 while (pop_free_region_list_locked() != NULL) ; |
5074 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5075 if (_free_region_list_size != 0) { | |
5076 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
5077 print_on(gclog_or_tty, true /* extended */); |
342 | 5078 } |
5079 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5080 } | |
5081 | |
5082 | |
5083 class RegionResetter: public HeapRegionClosure { | |
5084 G1CollectedHeap* _g1; | |
5085 int _n; | |
5086 public: | |
5087 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5088 bool doHeapRegion(HeapRegion* r) { | |
5089 if (r->continuesHumongous()) return false; | |
5090 if (r->top() > r->bottom()) { | |
5091 if (r->top() < r->end()) { | |
5092 Copy::fill_to_words(r->top(), | |
5093 pointer_delta(r->end(), r->top())); | |
5094 } | |
5095 r->set_zero_fill_allocated(); | |
5096 } else { | |
5097 assert(r->is_empty(), "tautology"); | |
677 | 5098 _n++; |
5099 switch (r->zero_fill_state()) { | |
342 | 5100 case HeapRegion::NotZeroFilled: |
5101 case HeapRegion::ZeroFilling: | |
5102 _g1->put_region_on_unclean_list_locked(r); | |
5103 break; | |
5104 case HeapRegion::Allocated: | |
5105 r->set_zero_fill_complete(); | |
5106 // no break; go on to put on free list. | |
5107 case HeapRegion::ZeroFilled: | |
5108 _g1->put_free_region_on_list_locked(r); | |
5109 break; | |
5110 } | |
5111 } | |
5112 return false; | |
5113 } | |
5114 | |
5115 int getFreeRegionCount() {return _n;} | |
5116 }; | |
5117 | |
5118 // Done at the end of full GC. | |
5119 void G1CollectedHeap::rebuild_region_lists() { | |
5120 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5121 // This needs to go at the end of the full GC. | |
5122 RegionResetter rs; | |
5123 heap_region_iterate(&rs); | |
5124 _free_regions = rs.getFreeRegionCount(); | |
5125 // Tell the ZF thread it may have work to do. | |
5126 if (should_zf()) ZF_mon->notify_all(); | |
5127 } | |
5128 | |
5129 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5130 G1CollectedHeap* _g1; | |
5131 int _n; | |
5132 public: | |
5133 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5134 bool doHeapRegion(HeapRegion* r) { | |
5135 if (r->continuesHumongous()) return false; | |
5136 if (r->top() > r->bottom()) { | |
5137 // There are assertions in "set_zero_fill_needed()" below that | |
5138 // require top() == bottom(), so this is technically illegal. | |
5139 // We'll skirt the law here, by making that true temporarily. | |
5140 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5141 r->set_top(r->bottom())); | |
5142 r->set_zero_fill_needed(); | |
5143 DEBUG_ONLY(r->set_top(save_top)); | |
5144 } | |
5145 return false; | |
5146 } | |
5147 }; | |
5148 | |
5149 // Done at the start of full GC. | |
5150 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5151 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5152 // This needs to go at the end of the full GC. | |
5153 UsedRegionsNeedZeroFillSetter rs; | |
5154 heap_region_iterate(&rs); | |
5155 } | |
5156 | |
5157 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5158 _refine_cte_cl->set_concurrent(concurrent); | |
5159 } | |
5160 | |
5161 #ifndef PRODUCT | |
5162 | |
5163 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5164 public: | |
5165 bool doHeapRegion(HeapRegion *r) { | |
5166 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5167 if (r != NULL) { | |
5168 if (r->is_on_free_list()) | |
5169 gclog_or_tty->print("Free "); | |
5170 if (r->is_young()) | |
5171 gclog_or_tty->print("Young "); | |
5172 if (r->isHumongous()) | |
5173 gclog_or_tty->print("Is Humongous "); | |
5174 r->print(); | |
5175 } | |
5176 return false; | |
5177 } | |
5178 }; | |
5179 | |
5180 class SortHeapRegionClosure : public HeapRegionClosure { | |
5181 size_t young_regions,free_regions, unclean_regions; | |
5182 size_t hum_regions, count; | |
5183 size_t unaccounted, cur_unclean, cur_alloc; | |
5184 size_t total_free; | |
5185 HeapRegion* cur; | |
5186 public: | |
5187 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5188 free_regions(0), unclean_regions(0), | |
5189 hum_regions(0), | |
5190 count(0), unaccounted(0), | |
5191 cur_alloc(0), total_free(0) | |
5192 {} | |
5193 bool doHeapRegion(HeapRegion *r) { | |
5194 count++; | |
5195 if (r->is_on_free_list()) free_regions++; | |
5196 else if (r->is_on_unclean_list()) unclean_regions++; | |
5197 else if (r->isHumongous()) hum_regions++; | |
5198 else if (r->is_young()) young_regions++; | |
5199 else if (r == cur) cur_alloc++; | |
5200 else unaccounted++; | |
5201 return false; | |
5202 } | |
5203 void print() { | |
5204 total_free = free_regions + unclean_regions; | |
5205 gclog_or_tty->print("%d regions\n", count); | |
5206 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5207 total_free, free_regions, unclean_regions); | |
5208 gclog_or_tty->print("%d humongous %d young\n", | |
5209 hum_regions, young_regions); | |
5210 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5211 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5212 } | |
5213 }; | |
5214 | |
5215 void G1CollectedHeap::print_region_counts() { | |
5216 SortHeapRegionClosure sc(_cur_alloc_region); | |
5217 PrintHeapRegionClosure cl; | |
5218 heap_region_iterate(&cl); | |
5219 heap_region_iterate(&sc); | |
5220 sc.print(); | |
5221 print_region_accounting_info(); | |
5222 }; | |
5223 | |
5224 bool G1CollectedHeap::regions_accounted_for() { | |
5225 // TODO: regions accounting for young/survivor/tenured | |
5226 return true; | |
5227 } | |
5228 | |
5229 bool G1CollectedHeap::print_region_accounting_info() { | |
5230 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5231 free_regions(), | |
5232 count_free_regions(), count_free_regions_list(), | |
5233 _free_region_list_size, _unclean_region_list.sz()); | |
5234 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5235 (_cur_alloc_region == NULL ? 0 : 1)); | |
5236 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5237 | |
5238 // TODO: check regions accounting for young/survivor/tenured | |
5239 return true; | |
5240 } | |
5241 | |
5242 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5243 HeapRegion* hr = heap_region_containing(p); | |
5244 if (hr == NULL) { | |
5245 return is_in_permanent(p); | |
5246 } else { | |
5247 return hr->is_in(p); | |
5248 } | |
5249 } | |
941 | 5250 #endif // !PRODUCT |
342 | 5251 |
5252 void G1CollectedHeap::g1_unimplemented() { | |
5253 // Unimplemented(); | |
5254 } |