Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1974:fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
Summary: Enable reference discovery during concurrent marking by setting the reference processor field of the concurrent marking closure. Keep reference objects on the discovered reference lists alive during incremental evacuation pauses until they are processed at the end of concurrent marking.
Reviewed-by: ysr, tonyp
author | johnc |
---|---|
date | Wed, 01 Dec 2010 17:34:02 -0800 |
parents | 631f79e71e90 |
children | d9310331a29c |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
31 #include "gc_implementation/g1/concurrentZFThread.hpp" | |
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _czft->stop(); |
429 _cmThread->stop(); | |
430 } | |
431 | |
432 | |
433 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
434 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
435 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
436 | |
437 // Count the dirty cards at the start. | |
438 CountNonCleanMemRegionClosure count1(this); | |
439 ct_bs->mod_card_iterate(&count1); | |
440 int orig_count = count1.n(); | |
441 | |
442 // First clear the logged cards. | |
443 ClearLoggedCardTableEntryClosure clear; | |
444 dcqs.set_closure(&clear); | |
445 dcqs.apply_closure_to_all_completed_buffers(); | |
446 dcqs.iterate_closure_all_threads(false); | |
447 clear.print_histo(); | |
448 | |
449 // Now ensure that there's no dirty cards. | |
450 CountNonCleanMemRegionClosure count2(this); | |
451 ct_bs->mod_card_iterate(&count2); | |
452 if (count2.n() != 0) { | |
453 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
454 count2.n(), orig_count); | |
455 } | |
456 guarantee(count2.n() == 0, "Card table should be clean."); | |
457 | |
458 RedirtyLoggedCardTableEntryClosure redirty; | |
459 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
460 dcqs.apply_closure_to_all_completed_buffers(); | |
461 dcqs.iterate_closure_all_threads(false); | |
462 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
463 clear.calls(), orig_count); | |
464 guarantee(redirty.calls() == clear.calls(), | |
465 "Or else mechanism is broken."); | |
466 | |
467 CountNonCleanMemRegionClosure count3(this); | |
468 ct_bs->mod_card_iterate(&count3); | |
469 if (count3.n() != orig_count) { | |
470 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
471 orig_count, count3.n()); | |
472 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
473 } | |
474 | |
475 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
476 } | |
477 | |
478 // Private class members. | |
479 | |
480 G1CollectedHeap* G1CollectedHeap::_g1h; | |
481 | |
482 // Private methods. | |
483 | |
484 // Finds a HeapRegion that can be used to allocate a given size of block. | |
485 | |
486 | |
487 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
488 bool do_expand, | |
489 bool zero_filled) { | |
490 ConcurrentZFThread::note_region_alloc(); | |
491 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
492 if (res == NULL && do_expand) { | |
493 expand(word_size * HeapWordSize); | |
494 res = alloc_free_region_from_lists(zero_filled); | |
495 assert(res == NULL || | |
496 (!res->isHumongous() && | |
497 (!zero_filled || | |
498 res->zero_fill_state() == HeapRegion::Allocated)), | |
499 "Alloc Regions must be zero filled (and non-H)"); | |
500 } | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
501 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
502 if (res->is_empty()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
503 _free_regions--; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
504 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
505 assert(!res->isHumongous() && |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
506 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
507 err_msg("Non-young alloc Regions must be zero filled (and non-H):" |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
508 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
509 res->isHumongous(), zero_filled, res->zero_fill_state())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
510 assert(!res->is_on_unclean_list(), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
511 "Alloc Regions must not be on the unclean list"); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
512 if (G1PrintHeapRegions) { |
342 | 513 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
514 "top "PTR_FORMAT, | |
515 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
516 } | |
517 } | |
518 return res; | |
519 } | |
520 | |
521 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
522 size_t word_size, | |
523 bool zero_filled) { | |
524 HeapRegion* alloc_region = NULL; | |
525 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
526 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
527 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 528 alloc_region->set_survivor(); |
342 | 529 } |
530 ++_gc_alloc_region_counts[purpose]; | |
531 } else { | |
532 g1_policy()->note_alloc_region_limit_reached(purpose); | |
533 } | |
534 return alloc_region; | |
535 } | |
536 | |
537 // If could fit into free regions w/o expansion, try. | |
538 // Otherwise, if can expand, do so. | |
539 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 540 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
541 assert_heap_locked_or_at_safepoint(); | |
342 | 542 assert(regions_accounted_for(), "Region leakage!"); |
543 | |
1973 | 544 // We can't allocate humongous regions while cleanupComplete is |
545 // running, since some of the regions we find to be empty might not | |
546 // yet be added to the unclean list. If we're already at a | |
547 // safepoint, this call is unnecessary, not to mention wrong. | |
548 if (!SafepointSynchronize::is_at_safepoint()) { | |
342 | 549 wait_for_cleanup_complete(); |
1973 | 550 } |
342 | 551 |
552 size_t num_regions = | |
1973 | 553 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 554 |
555 // Special case if < one region??? | |
556 | |
557 // Remember the ft size. | |
558 size_t x_size = expansion_regions(); | |
559 | |
560 HeapWord* res = NULL; | |
561 bool eliminated_allocated_from_lists = false; | |
562 | |
563 // Can the allocation potentially fit in the free regions? | |
564 if (free_regions() >= num_regions) { | |
565 res = _hrs->obj_allocate(word_size); | |
566 } | |
567 if (res == NULL) { | |
568 // Try expansion. | |
569 size_t fs = _hrs->free_suffix(); | |
570 if (fs + x_size >= num_regions) { | |
571 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
572 res = _hrs->obj_allocate(word_size); | |
573 assert(res != NULL, "This should have worked."); | |
574 } else { | |
575 // Expansion won't help. Are there enough free regions if we get rid | |
576 // of reservations? | |
577 size_t avail = free_regions(); | |
578 if (avail >= num_regions) { | |
579 res = _hrs->obj_allocate(word_size); | |
580 if (res != NULL) { | |
581 remove_allocated_regions_from_lists(); | |
582 eliminated_allocated_from_lists = true; | |
583 } | |
584 } | |
585 } | |
586 } | |
587 if (res != NULL) { | |
588 // Increment by the number of regions allocated. | |
589 // FIXME: Assumes regions all of size GrainBytes. | |
590 #ifndef PRODUCT | |
591 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
592 HeapRegion::GrainWords)); | |
593 #endif | |
594 if (!eliminated_allocated_from_lists) | |
595 remove_allocated_regions_from_lists(); | |
596 _summary_bytes_used += word_size * HeapWordSize; | |
597 _free_regions -= num_regions; | |
598 _num_humongous_regions += (int) num_regions; | |
599 } | |
600 assert(regions_accounted_for(), "Region Leakage"); | |
601 return res; | |
602 } | |
603 | |
1973 | 604 void |
605 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { | |
606 // The cleanup operation might update _summary_bytes_used | |
607 // concurrently with this method. So, right now, if we don't wait | |
608 // for it to complete, updates to _summary_bytes_used might get | |
609 // lost. This will be resolved in the near future when the operation | |
610 // of the free region list is revamped as part of CR 6977804. | |
611 wait_for_cleanup_complete(); | |
612 | |
613 retire_cur_alloc_region_common(cur_alloc_region); | |
614 assert(_cur_alloc_region == NULL, "post-condition"); | |
615 } | |
616 | |
617 // See the comment in the .hpp file about the locking protocol and | |
618 // assumptions of this method (and other related ones). | |
342 | 619 HeapWord* |
1973 | 620 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, |
621 bool at_safepoint, | |
622 bool do_dirtying) { | |
623 assert_heap_locked_or_at_safepoint(); | |
624 assert(_cur_alloc_region == NULL, | |
625 "replace_cur_alloc_region_and_allocate() should only be called " | |
626 "after retiring the previous current alloc region"); | |
627 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
628 "at_safepoint and is_at_safepoint() should be a tautology"); | |
629 | |
630 if (!g1_policy()->is_young_list_full()) { | |
631 if (!at_safepoint) { | |
632 // The cleanup operation might update _summary_bytes_used | |
633 // concurrently with this method. So, right now, if we don't | |
634 // wait for it to complete, updates to _summary_bytes_used might | |
635 // get lost. This will be resolved in the near future when the | |
636 // operation of the free region list is revamped as part of | |
637 // CR 6977804. If we're already at a safepoint, this call is | |
638 // unnecessary, not to mention wrong. | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
639 wait_for_cleanup_complete(); |
342 | 640 } |
1973 | 641 |
642 HeapRegion* new_cur_alloc_region = newAllocRegion(word_size, | |
643 false /* zero_filled */); | |
644 if (new_cur_alloc_region != NULL) { | |
645 assert(new_cur_alloc_region->is_empty(), | |
646 "the newly-allocated region should be empty, " | |
647 "as right now we only allocate new regions out of the free list"); | |
648 g1_policy()->update_region_num(true /* next_is_young */); | |
649 _summary_bytes_used -= new_cur_alloc_region->used(); | |
650 set_region_short_lived_locked(new_cur_alloc_region); | |
651 | |
652 assert(!new_cur_alloc_region->isHumongous(), | |
653 "Catch a regression of this bug."); | |
654 | |
655 // We need to ensure that the stores to _cur_alloc_region and, | |
656 // subsequently, to top do not float above the setting of the | |
657 // young type. | |
658 OrderAccess::storestore(); | |
659 | |
660 // Now allocate out of the new current alloc region. We could | |
661 // have re-used allocate_from_cur_alloc_region() but its | |
662 // operation is slightly different to what we need here. First, | |
663 // allocate_from_cur_alloc_region() is only called outside a | |
664 // safepoint and will always unlock the Heap_lock if it returns | |
665 // a non-NULL result. Second, it assumes that the current alloc | |
666 // region is what's already assigned in _cur_alloc_region. What | |
667 // we want here is to actually do the allocation first before we | |
668 // assign the new region to _cur_alloc_region. This ordering is | |
669 // not currently important, but it will be essential when we | |
670 // change the code to support CAS allocation in the future (see | |
671 // CR 6994297). | |
672 // | |
673 // This allocate method does BOT updates and we don't need them in | |
674 // the young generation. This will be fixed in the near future by | |
675 // CR 6994297. | |
676 HeapWord* result = new_cur_alloc_region->allocate(word_size); | |
677 assert(result != NULL, "we just allocate out of an empty region " | |
678 "so allocation should have been successful"); | |
679 assert(is_in(result), "result should be in the heap"); | |
680 | |
681 _cur_alloc_region = new_cur_alloc_region; | |
682 | |
683 if (!at_safepoint) { | |
684 Heap_lock->unlock(); | |
685 } | |
686 | |
687 // do the dirtying, if necessary, after we release the Heap_lock | |
688 if (do_dirtying) { | |
689 dirty_young_block(result, word_size); | |
690 } | |
691 return result; | |
692 } | |
693 } | |
694 | |
695 assert(_cur_alloc_region == NULL, "we failed to allocate a new current " | |
696 "alloc region, it should still be NULL"); | |
697 assert_heap_locked_or_at_safepoint(); | |
698 return NULL; | |
699 } | |
700 | |
701 // See the comment in the .hpp file about the locking protocol and | |
702 // assumptions of this method (and other related ones). | |
703 HeapWord* | |
704 G1CollectedHeap::attempt_allocation_slow(size_t word_size) { | |
705 assert_heap_locked_and_not_at_safepoint(); | |
706 assert(!isHumongous(word_size), "attempt_allocation_slow() should not be " | |
707 "used for humongous allocations"); | |
708 | |
709 // We will loop while succeeded is false, which means that we tried | |
710 // to do a collection, but the VM op did not succeed. So, when we | |
711 // exit the loop, either one of the allocation attempts was | |
712 // successful, or we succeeded in doing the VM op but which was | |
713 // unable to allocate after the collection. | |
714 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
715 bool succeeded = true; | |
716 | |
717 { | |
718 // We may have concurrent cleanup working at the time. Wait for | |
719 // it to complete. In the future we would probably want to make | |
720 // the concurrent cleanup truly concurrent by decoupling it from | |
721 // the allocation. This will happen in the near future as part | |
722 // of CR 6977804 which will revamp the operation of the free | |
723 // region list. The fact that wait_for_cleanup_complete() will | |
724 // do a wait() means that we'll give up the Heap_lock. So, it's | |
725 // possible that when we exit wait_for_cleanup_complete() we | |
726 // might be able to allocate successfully (since somebody else | |
727 // might have done a collection meanwhile). So, we'll attempt to | |
728 // allocate again, just in case. When we make cleanup truly | |
729 // concurrent with allocation, we should remove this allocation | |
730 // attempt as it's redundant (we only reach here after an | |
731 // allocation attempt has been unsuccessful). | |
732 wait_for_cleanup_complete(); | |
733 HeapWord* result = attempt_allocation(word_size); | |
734 if (result != NULL) { | |
735 assert_heap_not_locked(); | |
736 return result; | |
342 | 737 } |
738 } | |
1973 | 739 |
740 if (GC_locker::is_active_and_needs_gc()) { | |
741 // We are locked out of GC because of the GC locker. Right now, | |
742 // we'll just stall until the GC locker-induced GC | |
743 // completes. This will be fixed in the near future by extending | |
744 // the eden while waiting for the GC locker to schedule the GC | |
745 // (see CR 6994056). | |
746 | |
747 // If this thread is not in a jni critical section, we stall | |
748 // the requestor until the critical section has cleared and | |
749 // GC allowed. When the critical section clears, a GC is | |
750 // initiated by the last thread exiting the critical section; so | |
751 // we retry the allocation sequence from the beginning of the loop, | |
752 // rather than causing more, now probably unnecessary, GC attempts. | |
753 JavaThread* jthr = JavaThread::current(); | |
754 assert(jthr != NULL, "sanity"); | |
755 if (!jthr->in_critical()) { | |
756 MutexUnlocker mul(Heap_lock); | |
757 GC_locker::stall_until_clear(); | |
758 | |
759 // We'll then fall off the end of the ("if GC locker active") | |
760 // if-statement and retry the allocation further down in the | |
761 // loop. | |
762 } else { | |
763 if (CheckJNICalls) { | |
764 fatal("Possible deadlock due to allocating while" | |
765 " in jni critical section"); | |
766 } | |
767 return NULL; | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
768 } |
1973 | 769 } else { |
770 // We are not locked out. So, let's try to do a GC. The VM op | |
771 // will retry the allocation before it completes. | |
772 | |
773 // Read the GC count while holding the Heap_lock | |
774 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
775 | |
776 Heap_lock->unlock(); | |
777 | |
778 HeapWord* result = | |
779 do_collection_pause(word_size, gc_count_before, &succeeded); | |
780 assert_heap_not_locked(); | |
781 if (result != NULL) { | |
782 assert(succeeded, "the VM op should have succeeded"); | |
783 | |
784 // Allocations that take place on VM operations do not do any | |
785 // card dirtying and we have to do it here. | |
786 dirty_young_block(result, word_size); | |
787 return result; | |
788 } | |
789 | |
790 Heap_lock->lock(); | |
791 } | |
792 | |
793 assert_heap_locked(); | |
794 | |
795 // We can reach here when we were unsuccessful in doing a GC, | |
796 // because another thread beat us to it, or because we were locked | |
797 // out of GC due to the GC locker. In either case a new alloc | |
798 // region might be available so we will retry the allocation. | |
799 HeapWord* result = attempt_allocation(word_size); | |
800 if (result != NULL) { | |
801 assert_heap_not_locked(); | |
802 return result; | |
803 } | |
804 | |
805 // So far our attempts to allocate failed. The only time we'll go | |
806 // around the loop and try again is if we tried to do a GC and the | |
807 // VM op that we tried to schedule was not successful because | |
808 // another thread beat us to it. If that happened it's possible | |
809 // that by the time we grabbed the Heap_lock again and tried to | |
810 // allocate other threads filled up the young generation, which | |
811 // means that the allocation attempt after the GC also failed. So, | |
812 // it's worth trying to schedule another GC pause. | |
813 if (succeeded) { | |
814 break; | |
815 } | |
816 | |
817 // Give a warning if we seem to be looping forever. | |
818 if ((QueuedAllocationWarningCount > 0) && | |
819 (try_count % QueuedAllocationWarningCount == 0)) { | |
820 warning("G1CollectedHeap::attempt_allocation_slow() " | |
821 "retries %d times", try_count); | |
342 | 822 } |
823 } | |
824 | |
1973 | 825 assert_heap_locked(); |
826 return NULL; | |
827 } | |
828 | |
829 // See the comment in the .hpp file about the locking protocol and | |
830 // assumptions of this method (and other related ones). | |
831 HeapWord* | |
832 G1CollectedHeap::attempt_allocation_humongous(size_t word_size, | |
833 bool at_safepoint) { | |
834 // This is the method that will allocate a humongous object. All | |
835 // allocation paths that attempt to allocate a humongous object | |
836 // should eventually reach here. Currently, the only paths are from | |
837 // mem_allocate() and attempt_allocation_at_safepoint(). | |
838 assert_heap_locked_or_at_safepoint(); | |
839 assert(isHumongous(word_size), "attempt_allocation_humongous() " | |
840 "should only be used for humongous allocations"); | |
841 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
842 "at_safepoint and is_at_safepoint() should be a tautology"); | |
843 | |
844 HeapWord* result = NULL; | |
845 | |
846 // We will loop while succeeded is false, which means that we tried | |
847 // to do a collection, but the VM op did not succeed. So, when we | |
848 // exit the loop, either one of the allocation attempts was | |
849 // successful, or we succeeded in doing the VM op but which was | |
850 // unable to allocate after the collection. | |
851 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
852 bool succeeded = true; | |
853 | |
854 // Given that humongous objects are not allocated in young | |
855 // regions, we'll first try to do the allocation without doing a | |
856 // collection hoping that there's enough space in the heap. | |
857 result = humongous_obj_allocate(word_size); | |
858 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
859 "catch a regression of this bug."); | |
860 if (result != NULL) { | |
861 if (!at_safepoint) { | |
862 // If we're not at a safepoint, unlock the Heap_lock. | |
863 Heap_lock->unlock(); | |
864 } | |
865 return result; | |
866 } | |
867 | |
868 // If we failed to allocate the humongous object, we should try to | |
869 // do a collection pause (if we're allowed) in case it reclaims | |
870 // enough space for the allocation to succeed after the pause. | |
871 if (!at_safepoint) { | |
872 // Read the GC count while holding the Heap_lock | |
873 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
874 | |
875 // If we're allowed to do a collection we're not at a | |
876 // safepoint, so it is safe to unlock the Heap_lock. | |
342 | 877 Heap_lock->unlock(); |
1973 | 878 |
879 result = do_collection_pause(word_size, gc_count_before, &succeeded); | |
880 assert_heap_not_locked(); | |
881 if (result != NULL) { | |
882 assert(succeeded, "the VM op should have succeeded"); | |
883 return result; | |
884 } | |
885 | |
886 // If we get here, the VM operation either did not succeed | |
887 // (i.e., another thread beat us to it) or it succeeded but | |
888 // failed to allocate the object. | |
889 | |
890 // If we're allowed to do a collection we're not at a | |
891 // safepoint, so it is safe to lock the Heap_lock. | |
892 Heap_lock->lock(); | |
893 } | |
894 | |
895 assert(result == NULL, "otherwise we should have exited the loop earlier"); | |
896 | |
897 // So far our attempts to allocate failed. The only time we'll go | |
898 // around the loop and try again is if we tried to do a GC and the | |
899 // VM op that we tried to schedule was not successful because | |
900 // another thread beat us to it. That way it's possible that some | |
901 // space was freed up by the thread that successfully scheduled a | |
902 // GC. So it's worth trying to allocate again. | |
903 if (succeeded) { | |
904 break; | |
342 | 905 } |
906 | |
1973 | 907 // Give a warning if we seem to be looping forever. |
908 if ((QueuedAllocationWarningCount > 0) && | |
909 (try_count % QueuedAllocationWarningCount == 0)) { | |
910 warning("G1CollectedHeap::attempt_allocation_humongous " | |
911 "retries %d times", try_count); | |
912 } | |
913 } | |
914 | |
915 assert_heap_locked_or_at_safepoint(); | |
916 return NULL; | |
917 } | |
918 | |
919 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, | |
920 bool expect_null_cur_alloc_region) { | |
921 assert_at_safepoint(); | |
922 assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region, | |
923 "The current alloc region should only be non-NULL if we're " | |
924 "expecting it not to be NULL"); | |
925 | |
926 if (!isHumongous(word_size)) { | |
927 if (!expect_null_cur_alloc_region) { | |
928 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
929 if (cur_alloc_region != NULL) { | |
930 // This allocate method does BOT updates and we don't need them in | |
931 // the young generation. This will be fixed in the near future by | |
932 // CR 6994297. | |
933 HeapWord* result = cur_alloc_region->allocate(word_size); | |
934 if (result != NULL) { | |
935 assert(is_in(result), "result should be in the heap"); | |
936 | |
937 // We will not do any dirtying here. This is guaranteed to be | |
938 // called during a safepoint and the thread that scheduled the | |
939 // pause will do the dirtying if we return a non-NULL result. | |
940 return result; | |
941 } | |
942 | |
943 retire_cur_alloc_region_common(cur_alloc_region); | |
944 } | |
342 | 945 } |
1973 | 946 |
947 assert(_cur_alloc_region == NULL, | |
948 "at this point we should have no cur alloc region"); | |
949 return replace_cur_alloc_region_and_allocate(word_size, | |
950 true, /* at_safepoint */ | |
951 false /* do_dirtying */); | |
952 } else { | |
953 return attempt_allocation_humongous(word_size, | |
954 true /* at_safepoint */); | |
955 } | |
956 | |
957 ShouldNotReachHere(); | |
958 } | |
959 | |
960 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { | |
961 assert_heap_not_locked_and_not_at_safepoint(); | |
962 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); | |
963 | |
964 Heap_lock->lock(); | |
965 | |
966 // First attempt: try allocating out of the current alloc region or | |
967 // after replacing the current alloc region. | |
968 HeapWord* result = attempt_allocation(word_size); | |
969 if (result != NULL) { | |
970 assert_heap_not_locked(); | |
971 return result; | |
972 } | |
973 | |
974 assert_heap_locked(); | |
975 | |
976 // Second attempt: go into the even slower path where we might | |
977 // try to schedule a collection. | |
978 result = attempt_allocation_slow(word_size); | |
979 if (result != NULL) { | |
980 assert_heap_not_locked(); | |
981 return result; | |
982 } | |
983 | |
984 assert_heap_locked(); | |
985 Heap_lock->unlock(); | |
986 return NULL; | |
342 | 987 } |
988 | |
989 HeapWord* | |
990 G1CollectedHeap::mem_allocate(size_t word_size, | |
991 bool is_noref, | |
992 bool is_tlab, | |
1973 | 993 bool* gc_overhead_limit_was_exceeded) { |
994 assert_heap_not_locked_and_not_at_safepoint(); | |
995 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
996 "to allocate TLABs"); | |
342 | 997 |
998 // Loop until the allocation is satisified, | |
999 // or unsatisfied after GC. | |
1973 | 1000 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1001 unsigned int gc_count_before; | |
342 | 1002 { |
1003 Heap_lock->lock(); | |
1973 | 1004 |
1005 if (!isHumongous(word_size)) { | |
1006 // First attempt: try allocating out of the current alloc | |
1007 // region or after replacing the current alloc region. | |
1008 HeapWord* result = attempt_allocation(word_size); | |
1009 if (result != NULL) { | |
1010 assert_heap_not_locked(); | |
1011 return result; | |
1012 } | |
1013 | |
1014 assert_heap_locked(); | |
1015 | |
1016 // Second attempt: go into the even slower path where we might | |
1017 // try to schedule a collection. | |
1018 result = attempt_allocation_slow(word_size); | |
1019 if (result != NULL) { | |
1020 assert_heap_not_locked(); | |
1021 return result; | |
1022 } | |
1023 } else { | |
1024 HeapWord* result = attempt_allocation_humongous(word_size, | |
1025 false /* at_safepoint */); | |
1026 if (result != NULL) { | |
1027 assert_heap_not_locked(); | |
1028 return result; | |
1029 } | |
342 | 1030 } |
1973 | 1031 |
1032 assert_heap_locked(); | |
342 | 1033 // Read the gc count while the heap lock is held. |
1034 gc_count_before = SharedHeap::heap()->total_collections(); | |
1973 | 1035 // We cannot be at a safepoint, so it is safe to unlock the Heap_lock |
342 | 1036 Heap_lock->unlock(); |
1037 } | |
1038 | |
1039 // Create the garbage collection operation... | |
1973 | 1040 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 1041 // ...and get the VM thread to execute it. |
1042 VMThread::execute(&op); | |
1973 | 1043 |
1044 assert_heap_not_locked(); | |
1045 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
1046 // If the operation was successful we'll return the result even | |
1047 // if it is NULL. If the allocation attempt failed immediately | |
1048 // after a Full GC, it's unlikely we'll be able to allocate now. | |
1049 HeapWord* result = op.result(); | |
1050 if (result != NULL && !isHumongous(word_size)) { | |
1051 // Allocations that take place on VM operations do not do any | |
1052 // card dirtying and we have to do it here. We only have to do | |
1053 // this for non-humongous allocations, though. | |
1054 dirty_young_block(result, word_size); | |
1055 } | |
342 | 1056 return result; |
1973 | 1057 } else { |
1058 assert(op.result() == NULL, | |
1059 "the result should be NULL if the VM op did not succeed"); | |
342 | 1060 } |
1061 | |
1062 // Give a warning if we seem to be looping forever. | |
1063 if ((QueuedAllocationWarningCount > 0) && | |
1064 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 1065 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 1066 } |
1067 } | |
1973 | 1068 |
1069 ShouldNotReachHere(); | |
342 | 1070 } |
1071 | |
1072 void G1CollectedHeap::abandon_cur_alloc_region() { | |
1073 if (_cur_alloc_region != NULL) { | |
1074 // We're finished with the _cur_alloc_region. | |
1075 if (_cur_alloc_region->is_empty()) { | |
1076 _free_regions++; | |
1077 free_region(_cur_alloc_region); | |
1078 } else { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1079 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1080 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1081 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1082 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1083 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1084 } |
342 | 1085 _summary_bytes_used += _cur_alloc_region->used(); |
1086 } | |
1087 _cur_alloc_region = NULL; | |
1088 } | |
1089 } | |
1090 | |
636 | 1091 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1092 // first, make sure that the GC alloc region list is empty (it should!) | |
1093 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1094 release_gc_alloc_regions(true /* totally */); | |
1095 } | |
1096 | |
342 | 1097 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1098 ModRefBarrierSet* _mr_bs; | |
1099 public: | |
1100 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1101 bool doHeapRegion(HeapRegion* r) { | |
1102 r->reset_gc_time_stamp(); | |
1103 if (r->continuesHumongous()) | |
1104 return false; | |
1105 HeapRegionRemSet* hrrs = r->rem_set(); | |
1106 if (hrrs != NULL) hrrs->clear(); | |
1107 // You might think here that we could clear just the cards | |
1108 // corresponding to the used region. But no: if we leave a dirty card | |
1109 // in a region we might allocate into, then it would prevent that card | |
1110 // from being enqueued, and cause it to be missed. | |
1111 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1112 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1113 return false; | |
1114 } | |
1115 }; | |
1116 | |
1117 | |
1118 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1119 ModRefBarrierSet* _mr_bs; | |
1120 public: | |
1121 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1122 bool doHeapRegion(HeapRegion* r) { | |
1123 if (r->continuesHumongous()) return false; | |
1124 if (r->used_region().word_size() != 0) { | |
1125 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1126 } | |
1127 return false; | |
1128 } | |
1129 }; | |
1130 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1131 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1132 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1133 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1134 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1135 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1136 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1137 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1138 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1139 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1140 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1141 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1142 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1143 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1144 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1145 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1146 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1147 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1148 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1149 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1150 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1151 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1152 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1153 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1154 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1155 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1156 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1157 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1158 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1159 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1160 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1161 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1162 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1163 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1164 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1165 |
1973 | 1166 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1167 bool clear_all_soft_refs, |
342 | 1168 size_t word_size) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1169 if (GC_locker::check_active_before_gc()) { |
1973 | 1170 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1171 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1172 |
342 | 1173 ResourceMark rm; |
1174 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1175 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1176 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1177 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1178 |
342 | 1179 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
1180 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
1181 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1182 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1183 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1184 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1185 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1186 |
342 | 1187 { |
1188 IsGCActiveMark x; | |
1189 | |
1190 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1191 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1192 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1193 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1194 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1195 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1196 PrintGC, true, gclog_or_tty); |
342 | 1197 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1198 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1199 |
342 | 1200 double start = os::elapsedTime(); |
1201 g1_policy()->record_full_collection_start(); | |
1202 | |
1203 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1204 increment_total_collections(true /* full gc */); |
342 | 1205 |
1206 size_t g1h_prev_used = used(); | |
1207 assert(used() == recalculate_used(), "Should be equal"); | |
1208 | |
1209 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1210 HandleMark hm; // Discard invalid handles created during verification | |
1211 prepare_for_verify(); | |
1212 gclog_or_tty->print(" VerifyBeforeGC:"); | |
1213 Universe::verify(true); | |
1214 } | |
1215 assert(regions_accounted_for(), "Region leakage!"); | |
1216 | |
1217 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1218 | |
1219 // We want to discover references, but not process them yet. | |
1220 // This mode is disabled in | |
1221 // instanceRefKlass::process_discovered_references if the | |
1222 // generation does some collection work, or | |
1223 // instanceRefKlass::enqueue_discovered_references if the | |
1224 // generation returns without doing any work. | |
1225 ref_processor()->disable_discovery(); | |
1226 ref_processor()->abandon_partial_discovery(); | |
1227 ref_processor()->verify_no_references_recorded(); | |
1228 | |
1229 // Abandon current iterations of concurrent marking and concurrent | |
1230 // refinement, if any are in progress. | |
1231 concurrent_mark()->abort(); | |
1232 | |
1233 // Make sure we'll choose a new allocation region afterwards. | |
1234 abandon_cur_alloc_region(); | |
636 | 1235 abandon_gc_alloc_regions(); |
342 | 1236 assert(_cur_alloc_region == NULL, "Invariant."); |
1861 | 1237 g1_rem_set()->cleanupHRRS(); |
342 | 1238 tear_down_region_lists(); |
1239 set_used_regions_to_need_zero_fill(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1240 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1241 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1242 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1243 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1244 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1245 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1246 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1247 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1248 |
342 | 1249 if (g1_policy()->in_young_gc_mode()) { |
1250 empty_young_list(); | |
1251 g1_policy()->set_full_young_gcs(true); | |
1252 } | |
1253 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1254 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1255 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1256 |
342 | 1257 // Temporarily make reference _discovery_ single threaded (non-MT). |
1258 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
1259 | |
1260 // Temporarily make refs discovery atomic | |
1261 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1262 | |
1263 // Temporarily clear _is_alive_non_header | |
1264 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1265 | |
1266 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1267 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1268 |
1269 // Do collection work | |
1270 { | |
1271 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1272 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1273 } |
1274 // Because freeing humongous regions may have added some unclean | |
1275 // regions, it is necessary to tear down again before rebuilding. | |
1276 tear_down_region_lists(); | |
1277 rebuild_region_lists(); | |
1278 | |
1279 _summary_bytes_used = recalculate_used(); | |
1280 | |
1281 ref_processor()->enqueue_discovered_references(); | |
1282 | |
1283 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1284 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1285 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1286 |
342 | 1287 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1288 HandleMark hm; // Discard invalid handles created during verification | |
1289 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1290 prepare_for_verify(); |
342 | 1291 Universe::verify(false); |
1292 } | |
1293 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1294 | |
1295 reset_gc_time_stamp(); | |
1296 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1297 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1298 // sets. We will also reset the GC time stamps of the regions. |
342 | 1299 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1300 heap_region_iterate(&rs_clear); | |
1301 | |
1302 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1303 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1304 |
1305 if (_cg1r->use_cache()) { | |
1306 _cg1r->clear_and_record_card_counts(); | |
1307 _cg1r->clear_hot_cache(); | |
1308 } | |
1309 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1310 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1311 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1312 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1313 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1314 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1315 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1316 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1317 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1318 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1321 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1322 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1323 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1324 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1325 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1326 |
342 | 1327 if (PrintGC) { |
1328 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1329 } | |
1330 | |
1331 if (true) { // FIXME | |
1332 // Ask the permanent generation to adjust size for full collections | |
1333 perm()->compute_new_size(); | |
1334 } | |
1335 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1336 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1337 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1338 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1339 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1340 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1341 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1342 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1343 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1344 |
342 | 1345 double end = os::elapsedTime(); |
1346 g1_policy()->record_full_collection_end(); | |
1347 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1348 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1349 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1350 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1351 |
342 | 1352 gc_epilogue(true); |
1353 | |
794 | 1354 // Discard all rset updates |
1355 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1356 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1357 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1358 assert(regions_accounted_for(), "Region leakage!"); |
1359 } | |
1360 | |
1361 if (g1_policy()->in_young_gc_mode()) { | |
1362 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1363 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1364 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1365 assert( check_young_list_empty(true /* check_heap */), |
342 | 1366 "young list should be empty at this point"); |
1367 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1368 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1369 // Update the number of full collections that have been completed. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1370 increment_full_collections_completed(false /* outer */); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1371 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1372 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1373 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1374 } |
1973 | 1375 |
1376 return true; | |
342 | 1377 } |
1378 | |
1379 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1380 // do_collection() will return whether it succeeded in performing |
1381 // the GC. Currently, there is no facility on the | |
1382 // do_full_collection() API to notify the caller than the collection | |
1383 // did not succeed (e.g., because it was locked out by the GC | |
1384 // locker). So, right now, we'll ignore the return value. | |
1385 bool dummy = do_collection(true, /* explicit_gc */ | |
1386 clear_all_soft_refs, | |
1387 0 /* word_size */); | |
342 | 1388 } |
1389 | |
1390 // This code is mostly copied from TenuredGeneration. | |
1391 void | |
1392 G1CollectedHeap:: | |
1393 resize_if_necessary_after_full_collection(size_t word_size) { | |
1394 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1395 | |
1396 // Include the current allocation, if any, and bytes that will be | |
1397 // pre-allocated to support collections, as "used". | |
1398 const size_t used_after_gc = used(); | |
1399 const size_t capacity_after_gc = capacity(); | |
1400 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1401 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1402 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1403 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1404 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1405 |
342 | 1406 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1407 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1408 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1409 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1410 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1411 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1412 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1413 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1414 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1415 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1416 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1417 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1418 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1419 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1420 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1421 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1422 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1423 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1424 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1425 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1426 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1427 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1428 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1429 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1430 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1432 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1433 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1438 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1439 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1440 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1441 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1443 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1444 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1445 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1446 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1447 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1448 |
1449 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1450 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1451 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1452 gclog_or_tty->print_cr("Computing new size after full GC "); |
1453 gclog_or_tty->print_cr(" " | |
1454 " minimum_free_percentage: %6.2f", | |
1455 minimum_free_percentage); | |
1456 gclog_or_tty->print_cr(" " | |
1457 " maximum_free_percentage: %6.2f", | |
1458 maximum_free_percentage); | |
1459 gclog_or_tty->print_cr(" " | |
1460 " capacity: %6.1fK" | |
1461 " minimum_desired_capacity: %6.1fK" | |
1462 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1463 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1464 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1465 (double) maximum_desired_capacity / (double) K); |
342 | 1466 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1467 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1468 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1469 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1470 (double) used_after_gc / (double) K); |
342 | 1471 gclog_or_tty->print_cr(" " |
1472 " free_percentage: %6.2f", | |
1473 free_percentage); | |
1474 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1475 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1476 // Don't expand unless it's significant |
1477 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1478 expand(expand_bytes); | |
1479 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1480 gclog_or_tty->print_cr(" " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1481 " expanding:" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1482 " max_heap_size: %6.1fK" |
342 | 1483 " minimum_desired_capacity: %6.1fK" |
1484 " expand_bytes: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1485 (double) max_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1486 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1487 (double) expand_bytes / (double) K); |
342 | 1488 } |
1489 | |
1490 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1491 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1492 // Capacity too large, compute shrinking size |
1493 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1494 shrink(shrink_bytes); | |
1495 if (PrintGC && Verbose) { | |
1496 gclog_or_tty->print_cr(" " | |
1497 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1498 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1499 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1500 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1501 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1502 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1503 (double) shrink_bytes / (double) K); |
342 | 1504 } |
1505 } | |
1506 } | |
1507 | |
1508 | |
1509 HeapWord* | |
1973 | 1510 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1511 bool* succeeded) { | |
1512 assert(SafepointSynchronize::is_at_safepoint(), | |
1513 "satisfy_failed_allocation() should only be called at a safepoint"); | |
1514 assert(Thread::current()->is_VM_thread(), | |
1515 "satisfy_failed_allocation() should only be called by the VM thread"); | |
1516 | |
1517 *succeeded = true; | |
1518 // Let's attempt the allocation first. | |
1519 HeapWord* result = attempt_allocation_at_safepoint(word_size, | |
1520 false /* expect_null_cur_alloc_region */); | |
1521 if (result != NULL) { | |
1522 assert(*succeeded, "sanity"); | |
1523 return result; | |
1524 } | |
342 | 1525 |
1526 // In a G1 heap, we're supposed to keep allocation from failing by | |
1527 // incremental pauses. Therefore, at least for now, we'll favor | |
1528 // expansion over collection. (This might change in the future if we can | |
1529 // do something smarter than full collection to satisfy a failed alloc.) | |
1530 result = expand_and_allocate(word_size); | |
1531 if (result != NULL) { | |
1973 | 1532 assert(*succeeded, "sanity"); |
342 | 1533 return result; |
1534 } | |
1535 | |
1973 | 1536 // Expansion didn't work, we'll try to do a Full GC. |
1537 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1538 false, /* clear_all_soft_refs */ | |
1539 word_size); | |
1540 if (!gc_succeeded) { | |
1541 *succeeded = false; | |
1542 return NULL; | |
1543 } | |
1544 | |
1545 // Retry the allocation | |
1546 result = attempt_allocation_at_safepoint(word_size, | |
1547 true /* expect_null_cur_alloc_region */); | |
342 | 1548 if (result != NULL) { |
1973 | 1549 assert(*succeeded, "sanity"); |
342 | 1550 return result; |
1551 } | |
1552 | |
1973 | 1553 // Then, try a Full GC that will collect all soft references. |
1554 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1555 true, /* clear_all_soft_refs */ | |
1556 word_size); | |
1557 if (!gc_succeeded) { | |
1558 *succeeded = false; | |
1559 return NULL; | |
1560 } | |
1561 | |
1562 // Retry the allocation once more | |
1563 result = attempt_allocation_at_safepoint(word_size, | |
1564 true /* expect_null_cur_alloc_region */); | |
342 | 1565 if (result != NULL) { |
1973 | 1566 assert(*succeeded, "sanity"); |
342 | 1567 return result; |
1568 } | |
1569 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1570 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1571 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1572 |
342 | 1573 // What else? We might try synchronous finalization later. If the total |
1574 // space available is large enough for the allocation, then a more | |
1575 // complete compaction phase than we've tried so far might be | |
1576 // appropriate. | |
1973 | 1577 assert(*succeeded, "sanity"); |
342 | 1578 return NULL; |
1579 } | |
1580 | |
1581 // Attempting to expand the heap sufficiently | |
1582 // to support an allocation of the given "word_size". If | |
1583 // successful, perform the allocation and return the address of the | |
1584 // allocated block, or else "NULL". | |
1585 | |
1586 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1973 | 1587 assert(SafepointSynchronize::is_at_safepoint(), |
1588 "expand_and_allocate() should only be called at a safepoint"); | |
1589 assert(Thread::current()->is_VM_thread(), | |
1590 "expand_and_allocate() should only be called by the VM thread"); | |
1591 | |
342 | 1592 size_t expand_bytes = word_size * HeapWordSize; |
1593 if (expand_bytes < MinHeapDeltaBytes) { | |
1594 expand_bytes = MinHeapDeltaBytes; | |
1595 } | |
1596 expand(expand_bytes); | |
1597 assert(regions_accounted_for(), "Region leakage!"); | |
1973 | 1598 |
1599 return attempt_allocation_at_safepoint(word_size, | |
1600 true /* expect_null_cur_alloc_region */); | |
342 | 1601 } |
1602 | |
1603 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1604 size_t pre_used = 0; | |
1605 size_t cleared_h_regions = 0; | |
1606 size_t freed_regions = 0; | |
1607 UncleanRegionList local_list; | |
1608 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1609 freed_regions, &local_list); | |
1610 | |
1611 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1612 &local_list); | |
1613 return pre_used; | |
1614 } | |
1615 | |
1616 void | |
1617 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1618 size_t& pre_used, | |
1619 size_t& cleared_h, | |
1620 size_t& freed_regions, | |
1621 UncleanRegionList* list, | |
1622 bool par) { | |
1623 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1624 size_t res = 0; | |
677 | 1625 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1626 !hr->is_young()) { | |
1627 if (G1PolicyVerbose > 0) | |
1628 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1629 " during cleanup", hr, hr->used()); | |
1630 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1631 } |
1632 } | |
1633 | |
1634 // FIXME: both this and shrink could probably be more efficient by | |
1635 // doing one "VirtualSpace::expand_by" call rather than several. | |
1636 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1637 size_t old_mem_size = _g1_storage.committed_size(); | |
1638 // We expand by a minimum of 1K. | |
1639 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1640 size_t aligned_expand_bytes = | |
1641 ReservedSpace::page_align_size_up(expand_bytes); | |
1642 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1643 HeapRegion::GrainBytes); | |
1644 expand_bytes = aligned_expand_bytes; | |
1645 while (expand_bytes > 0) { | |
1646 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1647 // Commit more storage. | |
1648 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1649 if (!successful) { | |
1650 expand_bytes = 0; | |
1651 } else { | |
1652 expand_bytes -= HeapRegion::GrainBytes; | |
1653 // Expand the committed region. | |
1654 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1655 _g1_committed.set_end(high); | |
1656 // Create a new HeapRegion. | |
1657 MemRegion mr(base, high); | |
1658 bool is_zeroed = !_g1_max_committed.contains(base); | |
1659 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1660 | |
1661 // Now update max_committed if necessary. | |
1662 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1663 | |
1664 // Add it to the HeapRegionSeq. | |
1665 _hrs->insert(hr); | |
1666 // Set the zero-fill state, according to whether it's already | |
1667 // zeroed. | |
1668 { | |
1669 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1670 if (is_zeroed) { | |
1671 hr->set_zero_fill_complete(); | |
1672 put_free_region_on_list_locked(hr); | |
1673 } else { | |
1674 hr->set_zero_fill_needed(); | |
1675 put_region_on_unclean_list_locked(hr); | |
1676 } | |
1677 } | |
1678 _free_regions++; | |
1679 // And we used up an expansion region to create it. | |
1680 _expansion_regions--; | |
1681 // Tell the cardtable about it. | |
1682 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1683 // And the offset table as well. | |
1684 _bot_shared->resize(_g1_committed.word_size()); | |
1685 } | |
1686 } | |
1687 if (Verbose && PrintGC) { | |
1688 size_t new_mem_size = _g1_storage.committed_size(); | |
1689 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1690 old_mem_size/K, aligned_expand_bytes/K, | |
1691 new_mem_size/K); | |
1692 } | |
1693 } | |
1694 | |
1695 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1696 { | |
1697 size_t old_mem_size = _g1_storage.committed_size(); | |
1698 size_t aligned_shrink_bytes = | |
1699 ReservedSpace::page_align_size_down(shrink_bytes); | |
1700 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1701 HeapRegion::GrainBytes); | |
1702 size_t num_regions_deleted = 0; | |
1703 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1704 | |
1705 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1706 if (mr.byte_size() > 0) | |
1707 _g1_storage.shrink_by(mr.byte_size()); | |
1708 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1709 | |
1710 _g1_committed.set_end(mr.start()); | |
1711 _free_regions -= num_regions_deleted; | |
1712 _expansion_regions += num_regions_deleted; | |
1713 | |
1714 // Tell the cardtable about it. | |
1715 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1716 | |
1717 // And the offset table as well. | |
1718 _bot_shared->resize(_g1_committed.word_size()); | |
1719 | |
1720 HeapRegionRemSet::shrink_heap(n_regions()); | |
1721 | |
1722 if (Verbose && PrintGC) { | |
1723 size_t new_mem_size = _g1_storage.committed_size(); | |
1724 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1725 old_mem_size/K, aligned_shrink_bytes/K, | |
1726 new_mem_size/K); | |
1727 } | |
1728 } | |
1729 | |
1730 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1731 release_gc_alloc_regions(true /* totally */); |
342 | 1732 tear_down_region_lists(); // We will rebuild them in a moment. |
1733 shrink_helper(shrink_bytes); | |
1734 rebuild_region_lists(); | |
1735 } | |
1736 | |
1737 // Public methods. | |
1738 | |
1739 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1740 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1741 #endif // _MSC_VER | |
1742 | |
1743 | |
1744 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1745 SharedHeap(policy_), | |
1746 _g1_policy(policy_), | |
1111 | 1747 _dirty_card_queue_set(false), |
1705 | 1748 _into_cset_dirty_card_queue_set(false), |
342 | 1749 _ref_processor(NULL), |
1750 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1751 _bot_shared(NULL), | |
1752 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1753 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1754 _evac_failure_scan_stack(NULL) , | |
1755 _mark_in_progress(false), | |
1756 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1757 _cur_alloc_region(NULL), | |
1758 _refine_cte_cl(NULL), | |
1759 _free_region_list(NULL), _free_region_list_size(0), | |
1760 _free_regions(0), | |
1761 _full_collection(false), | |
1762 _unclean_region_list(), | |
1763 _unclean_regions_coming(false), | |
1764 _young_list(new YoungList(this)), | |
1765 _gc_time_stamp(0), | |
526 | 1766 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1767 _full_collections_completed(0), |
526 | 1768 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1769 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1770 _dirty_cards_region_list(NULL) { |
342 | 1771 _g1h = this; // To catch bugs. |
1772 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1773 vm_exit_during_initialization("Failed necessary allocation."); | |
1774 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1775 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1776 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1777 |
342 | 1778 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1779 _task_queues = new RefToScanQueueSet(n_queues); | |
1780 | |
1781 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1782 assert(n_rem_sets > 0, "Invariant."); | |
1783 | |
1784 HeapRegionRemSetIterator** iter_arr = | |
1785 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1786 for (int i = 0; i < n_queues; i++) { | |
1787 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1788 } | |
1789 _rem_set_iterator = iter_arr; | |
1790 | |
1791 for (int i = 0; i < n_queues; i++) { | |
1792 RefToScanQueue* q = new RefToScanQueue(); | |
1793 q->initialize(); | |
1794 _task_queues->register_queue(i, q); | |
1795 } | |
1796 | |
1797 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1798 _gc_alloc_regions[ap] = NULL; |
1799 _gc_alloc_region_counts[ap] = 0; | |
1800 _retained_gc_alloc_regions[ap] = NULL; | |
1801 // by default, we do not retain a GC alloc region for each ap; | |
1802 // we'll override this, when appropriate, below | |
1803 _retain_gc_alloc_region[ap] = false; | |
1804 } | |
1805 | |
1806 // We will try to remember the last half-full tenured region we | |
1807 // allocated to at the end of a collection so that we can re-use it | |
1808 // during the next collection. | |
1809 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1810 | |
342 | 1811 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1812 } | |
1813 | |
1814 jint G1CollectedHeap::initialize() { | |
1166 | 1815 CollectedHeap::pre_initialize(); |
342 | 1816 os::enable_vtime(); |
1817 | |
1818 // Necessary to satisfy locking discipline assertions. | |
1819 | |
1820 MutexLocker x(Heap_lock); | |
1821 | |
1822 // While there are no constraints in the GC code that HeapWordSize | |
1823 // be any particular value, there are multiple other areas in the | |
1824 // system which believe this to be true (e.g. oop->object_size in some | |
1825 // cases incorrectly returns the size in wordSize units rather than | |
1826 // HeapWordSize). | |
1827 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1828 | |
1829 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1830 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1831 | |
1832 // Ensure that the sizes are properly aligned. | |
1833 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1834 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1835 | |
1836 _cg1r = new ConcurrentG1Refine(); | |
1837 | |
1838 // Reserve the maximum. | |
1839 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1840 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1841 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1842 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1843 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1844 |
342 | 1845 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1846 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1847 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1848 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1849 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1850 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1851 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1852 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1853 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1854 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1855 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1856 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1857 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1858 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1859 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1860 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1861 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1862 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1863 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1864 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1865 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1866 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1867 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1868 } |
342 | 1869 |
1870 if (!heap_rs.is_reserved()) { | |
1871 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1872 return JNI_ENOMEM; | |
1873 } | |
1874 | |
1875 // It is important to do this in a way such that concurrent readers can't | |
1876 // temporarily think somethings in the heap. (I've actually seen this | |
1877 // happen in asserts: DLD.) | |
1878 _reserved.set_word_size(0); | |
1879 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1880 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1881 | |
1882 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1883 | |
1884 _num_humongous_regions = 0; | |
1885 | |
1886 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1887 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1888 set_barrier_set(rem_set()->bs()); | |
1889 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1890 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1891 } else { | |
1892 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1893 return JNI_ENOMEM; | |
1894 } | |
1895 | |
1896 // Also create a G1 rem set. | |
1861 | 1897 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1898 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1899 } else { |
1861 | 1900 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1901 return JNI_ENOMEM; | |
342 | 1902 } |
1903 | |
1904 // Carve out the G1 part of the heap. | |
1905 | |
1906 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1907 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1908 g1_rs.size()/HeapWordSize); | |
1909 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1910 | |
1911 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1912 | |
1913 _g1_storage.initialize(g1_rs, 0); | |
1914 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1915 _g1_max_committed = _g1_committed; | |
393 | 1916 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1917 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1918 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1919 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1920 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1921 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1922 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1923 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1924 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1925 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1926 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1927 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1928 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1929 |
342 | 1930 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1931 heap_word_size(init_byte_size)); | |
1932 | |
1933 _g1h = this; | |
1934 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1935 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1936 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1937 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1938 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1939 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1940 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1941 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1942 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1943 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1944 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1945 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1946 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1947 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1948 |
342 | 1949 // Create the ConcurrentMark data structure and thread. |
1950 // (Must do this late, so that "max_regions" is defined.) | |
1951 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1952 _cmThread = _cm->cmThread(); | |
1953 | |
1954 // ...and the concurrent zero-fill thread, if necessary. | |
1955 if (G1ConcZeroFill) { | |
1956 _czft = new ConcurrentZFThread(); | |
1957 } | |
1958 | |
1959 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1960 HeapRegionRemSet::init_heap(max_regions()); | |
1961 | |
677 | 1962 // Now expand into the initial heap size. |
1963 expand(init_byte_size); | |
342 | 1964 |
1965 // Perform any initialization actions delegated to the policy. | |
1966 g1_policy()->init(); | |
1967 | |
1968 g1_policy()->note_start_of_mark_thread(); | |
1969 | |
1970 _refine_cte_cl = | |
1971 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1972 g1_rem_set(), | |
1973 concurrent_g1_refine()); | |
1974 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1975 | |
1976 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1977 SATB_Q_FL_lock, | |
1111 | 1978 G1SATBProcessCompletedThreshold, |
342 | 1979 Shared_SATB_Q_lock); |
794 | 1980 |
1981 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1982 DirtyCardQ_FL_lock, | |
1111 | 1983 concurrent_g1_refine()->yellow_zone(), |
1984 concurrent_g1_refine()->red_zone(), | |
794 | 1985 Shared_DirtyCardQ_lock); |
1986 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1987 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1988 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1989 DirtyCardQ_FL_lock, |
1111 | 1990 -1, // never trigger processing |
1991 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1992 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1993 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1994 } |
1705 | 1995 |
1996 // Initialize the card queue set used to hold cards containing | |
1997 // references into the collection set. | |
1998 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1999 DirtyCardQ_FL_lock, | |
2000 -1, // never trigger processing | |
2001 -1, // no limit on length | |
2002 Shared_DirtyCardQ_lock, | |
2003 &JavaThread::dirty_card_queue_set()); | |
2004 | |
342 | 2005 // In case we're keeping closure specialization stats, initialize those |
2006 // counts and that mechanism. | |
2007 SpecializationStats::clear(); | |
2008 | |
2009 _gc_alloc_region_list = NULL; | |
2010 | |
2011 // Do later initialization work for concurrent refinement. | |
2012 _cg1r->init(); | |
2013 | |
2014 return JNI_OK; | |
2015 } | |
2016 | |
2017 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2018 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2019 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2020 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2021 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2022 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2023 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2024 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2025 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2026 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2027 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2028 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2029 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2030 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2031 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2032 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2033 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2034 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2035 |
342 | 2036 SharedHeap::ref_processing_init(); |
2037 MemRegion mr = reserved_region(); | |
2038 _ref_processor = ReferenceProcessor::create_ref_processor( | |
2039 mr, // span | |
2040 false, // Reference discovery is not atomic | |
2041 // (though it shouldn't matter here.) | |
2042 true, // mt_discovery | |
2043 NULL, // is alive closure: need to fill this in for efficiency | |
2044 ParallelGCThreads, | |
2045 ParallelRefProcEnabled, | |
2046 true); // Setting next fields of discovered | |
2047 // lists requires a barrier. | |
2048 } | |
2049 | |
2050 size_t G1CollectedHeap::capacity() const { | |
2051 return _g1_committed.byte_size(); | |
2052 } | |
2053 | |
1705 | 2054 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2055 DirtyCardQueue* into_cset_dcq, | |
2056 bool concurrent, | |
342 | 2057 int worker_i) { |
889 | 2058 // Clean cards in the hot card cache |
1705 | 2059 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2060 |
342 | 2061 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2062 int n_completed_buffers = 0; | |
1705 | 2063 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2064 n_completed_buffers++; |
2065 } | |
2066 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2067 (double) n_completed_buffers); | |
2068 dcqs.clear_n_completed_buffers(); | |
2069 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2070 } | |
2071 | |
2072 | |
2073 // Computes the sum of the storage used by the various regions. | |
2074 | |
2075 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2076 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2077 "Should be owned on this thread's behalf."); |
342 | 2078 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2079 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2080 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2081 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2082 result += hr->used(); |
342 | 2083 return result; |
2084 } | |
2085 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2086 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2087 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2088 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2089 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2090 |
342 | 2091 class SumUsedClosure: public HeapRegionClosure { |
2092 size_t _used; | |
2093 public: | |
2094 SumUsedClosure() : _used(0) {} | |
2095 bool doHeapRegion(HeapRegion* r) { | |
2096 if (!r->continuesHumongous()) { | |
2097 _used += r->used(); | |
2098 } | |
2099 return false; | |
2100 } | |
2101 size_t result() { return _used; } | |
2102 }; | |
2103 | |
2104 size_t G1CollectedHeap::recalculate_used() const { | |
2105 SumUsedClosure blk; | |
2106 _hrs->iterate(&blk); | |
2107 return blk.result(); | |
2108 } | |
2109 | |
2110 #ifndef PRODUCT | |
2111 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2112 size_t _num; | |
2113 public: | |
677 | 2114 SumUsedRegionsClosure() : _num(0) {} |
342 | 2115 bool doHeapRegion(HeapRegion* r) { |
2116 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2117 _num += 1; | |
2118 } | |
2119 return false; | |
2120 } | |
2121 size_t result() { return _num; } | |
2122 }; | |
2123 | |
2124 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2125 SumUsedRegionsClosure blk; | |
2126 _hrs->iterate(&blk); | |
2127 return blk.result(); | |
2128 } | |
2129 #endif // PRODUCT | |
2130 | |
2131 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2132 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
2133 // otherwise, is there space in the current allocation region? | |
2134 | |
2135 // We need to store the current allocation region in a local variable | |
2136 // here. The problem is that this method doesn't take any locks and | |
2137 // there may be other threads which overwrite the current allocation | |
2138 // region field. attempt_allocation(), for example, sets it to NULL | |
2139 // and this can happen *after* the NULL check here but before the call | |
2140 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2141 // to be a problem in the optimized build, since the two loads of the | |
2142 // current allocation region field are optimized away. | |
2143 HeapRegion* car = _cur_alloc_region; | |
2144 | |
2145 // FIXME: should iterate over all regions? | |
2146 if (car == NULL) { | |
2147 return 0; | |
2148 } | |
2149 return car->free(); | |
2150 } | |
2151 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2152 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2153 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2154 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2155 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2156 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2157 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2158 void G1CollectedHeap::increment_full_collections_completed(bool outer) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2159 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2160 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2161 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2162 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2163 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2164 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2165 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2166 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2167 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2168 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2169 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2170 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2171 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2172 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2173 // This is the case for the inner caller, i.e. a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2174 assert(outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2175 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2176 (full_collections_started == _full_collections_completed + 2), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2177 err_msg("for inner caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2178 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2179 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2180 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2181 // This is the case for the outer caller, i.e. the concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2182 assert(!outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2183 (full_collections_started == _full_collections_completed + 1), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2184 err_msg("for outer caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2185 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2186 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2187 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2188 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2189 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2190 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2191 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2192 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2193 // incorrectly see that a marking cyle is still in progress. |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2194 if (outer) { |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2195 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2196 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2197 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2198 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2199 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2200 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2201 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2202 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2203 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2204 |
342 | 2205 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2206 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
2207 assert(Heap_lock->is_locked(), "Precondition#2"); | |
2208 GCCauseSetter gcs(this, cause); | |
2209 switch (cause) { | |
2210 case GCCause::_heap_inspection: | |
2211 case GCCause::_heap_dump: { | |
2212 HandleMark hm; | |
2213 do_full_collection(false); // don't clear all soft refs | |
2214 break; | |
2215 } | |
2216 default: // XXX FIX ME | |
2217 ShouldNotReachHere(); // Unexpected use of this function | |
2218 } | |
2219 } | |
2220 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2221 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2222 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2223 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2224 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2225 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2226 unsigned int full_gc_count_before; |
342 | 2227 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2228 MutexLocker ml(Heap_lock); |
1973 | 2229 |
2230 // Don't want to do a GC until cleanup is completed. This | |
2231 // limitation will be removed in the near future when the | |
2232 // operation of the free region list is revamped as part of | |
2233 // CR 6977804. | |
2234 wait_for_cleanup_complete(); | |
2235 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2236 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2237 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2238 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2239 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2240 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2241 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2242 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2243 // concurrent cycle. We're setting word_size to 0 which means that |
2244 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2245 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2246 0, /* word_size */ |
2247 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2248 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2249 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2250 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2251 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2252 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2253 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2254 |
1973 | 2255 // Schedule a standard evacuation pause. We're setting word_size |
2256 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2257 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2258 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2259 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2260 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2261 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2262 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2263 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2264 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2265 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2266 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2267 } |
342 | 2268 } |
2269 } | |
2270 | |
2271 bool G1CollectedHeap::is_in(const void* p) const { | |
2272 if (_g1_committed.contains(p)) { | |
2273 HeapRegion* hr = _hrs->addr_to_region(p); | |
2274 return hr->is_in(p); | |
2275 } else { | |
2276 return _perm_gen->as_gen()->is_in(p); | |
2277 } | |
2278 } | |
2279 | |
2280 // Iteration functions. | |
2281 | |
2282 // Iterates an OopClosure over all ref-containing fields of objects | |
2283 // within a HeapRegion. | |
2284 | |
2285 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2286 MemRegion _mr; | |
2287 OopClosure* _cl; | |
2288 public: | |
2289 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2290 : _mr(mr), _cl(cl) {} | |
2291 bool doHeapRegion(HeapRegion* r) { | |
2292 if (! r->continuesHumongous()) { | |
2293 r->oop_iterate(_cl); | |
2294 } | |
2295 return false; | |
2296 } | |
2297 }; | |
2298 | |
678 | 2299 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2300 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2301 _hrs->iterate(&blk); | |
678 | 2302 if (do_perm) { |
2303 perm_gen()->oop_iterate(cl); | |
2304 } | |
342 | 2305 } |
2306 | |
678 | 2307 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2308 IterateOopClosureRegionClosure blk(mr, cl); |
2309 _hrs->iterate(&blk); | |
678 | 2310 if (do_perm) { |
2311 perm_gen()->oop_iterate(cl); | |
2312 } | |
342 | 2313 } |
2314 | |
2315 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2316 | |
2317 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2318 ObjectClosure* _cl; | |
2319 public: | |
2320 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2321 bool doHeapRegion(HeapRegion* r) { | |
2322 if (! r->continuesHumongous()) { | |
2323 r->object_iterate(_cl); | |
2324 } | |
2325 return false; | |
2326 } | |
2327 }; | |
2328 | |
678 | 2329 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2330 IterateObjectClosureRegionClosure blk(cl); |
2331 _hrs->iterate(&blk); | |
678 | 2332 if (do_perm) { |
2333 perm_gen()->object_iterate(cl); | |
2334 } | |
342 | 2335 } |
2336 | |
2337 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2338 // FIXME: is this right? | |
2339 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2340 } | |
2341 | |
2342 // Calls a SpaceClosure on a HeapRegion. | |
2343 | |
2344 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2345 SpaceClosure* _cl; | |
2346 public: | |
2347 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2348 bool doHeapRegion(HeapRegion* r) { | |
2349 _cl->do_space(r); | |
2350 return false; | |
2351 } | |
2352 }; | |
2353 | |
2354 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2355 SpaceClosureRegionClosure blk(cl); | |
2356 _hrs->iterate(&blk); | |
2357 } | |
2358 | |
2359 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
2360 _hrs->iterate(cl); | |
2361 } | |
2362 | |
2363 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
2364 HeapRegionClosure* cl) { | |
2365 _hrs->iterate_from(r, cl); | |
2366 } | |
2367 | |
2368 void | |
2369 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
2370 _hrs->iterate_from(idx, cl); | |
2371 } | |
2372 | |
2373 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
2374 | |
2375 void | |
2376 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2377 int worker, | |
2378 jint claim_value) { | |
355 | 2379 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2380 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2381 // try to spread out the starting points of the workers |
2382 const size_t start_index = regions / worker_num * (size_t) worker; | |
2383 | |
2384 // each worker will actually look at all regions | |
2385 for (size_t count = 0; count < regions; ++count) { | |
2386 const size_t index = (start_index + count) % regions; | |
2387 assert(0 <= index && index < regions, "sanity"); | |
2388 HeapRegion* r = region_at(index); | |
2389 // we'll ignore "continues humongous" regions (we'll process them | |
2390 // when we come across their corresponding "start humongous" | |
2391 // region) and regions already claimed | |
2392 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2393 continue; | |
2394 } | |
2395 // OK, try to claim it | |
342 | 2396 if (r->claimHeapRegion(claim_value)) { |
355 | 2397 // success! |
2398 assert(!r->continuesHumongous(), "sanity"); | |
2399 if (r->startsHumongous()) { | |
2400 // If the region is "starts humongous" we'll iterate over its | |
2401 // "continues humongous" first; in fact we'll do them | |
2402 // first. The order is important. In on case, calling the | |
2403 // closure on the "starts humongous" region might de-allocate | |
2404 // and clear all its "continues humongous" regions and, as a | |
2405 // result, we might end up processing them twice. So, we'll do | |
2406 // them first (notice: most closures will ignore them anyway) and | |
2407 // then we'll do the "starts humongous" region. | |
2408 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2409 HeapRegion* chr = region_at(ch_index); | |
2410 | |
2411 // if the region has already been claimed or it's not | |
2412 // "continues humongous" we're done | |
2413 if (chr->claim_value() == claim_value || | |
2414 !chr->continuesHumongous()) { | |
2415 break; | |
2416 } | |
2417 | |
2418 // Noone should have claimed it directly. We can given | |
2419 // that we claimed its "starts humongous" region. | |
2420 assert(chr->claim_value() != claim_value, "sanity"); | |
2421 assert(chr->humongous_start_region() == r, "sanity"); | |
2422 | |
2423 if (chr->claimHeapRegion(claim_value)) { | |
2424 // we should always be able to claim it; noone else should | |
2425 // be trying to claim this region | |
2426 | |
2427 bool res2 = cl->doHeapRegion(chr); | |
2428 assert(!res2, "Should not abort"); | |
2429 | |
2430 // Right now, this holds (i.e., no closure that actually | |
2431 // does something with "continues humongous" regions | |
2432 // clears them). We might have to weaken it in the future, | |
2433 // but let's leave these two asserts here for extra safety. | |
2434 assert(chr->continuesHumongous(), "should still be the case"); | |
2435 assert(chr->humongous_start_region() == r, "sanity"); | |
2436 } else { | |
2437 guarantee(false, "we should not reach here"); | |
2438 } | |
2439 } | |
2440 } | |
2441 | |
2442 assert(!r->continuesHumongous(), "sanity"); | |
2443 bool res = cl->doHeapRegion(r); | |
2444 assert(!res, "Should not abort"); | |
2445 } | |
2446 } | |
2447 } | |
2448 | |
390 | 2449 class ResetClaimValuesClosure: public HeapRegionClosure { |
2450 public: | |
2451 bool doHeapRegion(HeapRegion* r) { | |
2452 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2453 return false; | |
2454 } | |
2455 }; | |
2456 | |
2457 void | |
2458 G1CollectedHeap::reset_heap_region_claim_values() { | |
2459 ResetClaimValuesClosure blk; | |
2460 heap_region_iterate(&blk); | |
2461 } | |
2462 | |
355 | 2463 #ifdef ASSERT |
2464 // This checks whether all regions in the heap have the correct claim | |
2465 // value. I also piggy-backed on this a check to ensure that the | |
2466 // humongous_start_region() information on "continues humongous" | |
2467 // regions is correct. | |
2468 | |
2469 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2470 private: | |
2471 jint _claim_value; | |
2472 size_t _failures; | |
2473 HeapRegion* _sh_region; | |
2474 public: | |
2475 CheckClaimValuesClosure(jint claim_value) : | |
2476 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2477 bool doHeapRegion(HeapRegion* r) { | |
2478 if (r->claim_value() != _claim_value) { | |
2479 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2480 "claim value = %d, should be %d", | |
2481 r->bottom(), r->end(), r->claim_value(), | |
2482 _claim_value); | |
2483 ++_failures; | |
2484 } | |
2485 if (!r->isHumongous()) { | |
2486 _sh_region = NULL; | |
2487 } else if (r->startsHumongous()) { | |
2488 _sh_region = r; | |
2489 } else if (r->continuesHumongous()) { | |
2490 if (r->humongous_start_region() != _sh_region) { | |
2491 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2492 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2493 r->bottom(), r->end(), | |
2494 r->humongous_start_region(), | |
2495 _sh_region); | |
2496 ++_failures; | |
342 | 2497 } |
2498 } | |
355 | 2499 return false; |
2500 } | |
2501 size_t failures() { | |
2502 return _failures; | |
2503 } | |
2504 }; | |
2505 | |
2506 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2507 CheckClaimValuesClosure cl(claim_value); | |
2508 heap_region_iterate(&cl); | |
2509 return cl.failures() == 0; | |
2510 } | |
2511 #endif // ASSERT | |
342 | 2512 |
2513 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2514 HeapRegion* r = g1_policy()->collection_set(); | |
2515 while (r != NULL) { | |
2516 HeapRegion* next = r->next_in_collection_set(); | |
2517 if (cl->doHeapRegion(r)) { | |
2518 cl->incomplete(); | |
2519 return; | |
2520 } | |
2521 r = next; | |
2522 } | |
2523 } | |
2524 | |
2525 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2526 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2527 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2528 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2529 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2530 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2531 |
342 | 2532 assert(r->in_collection_set(), |
2533 "Start region must be a member of the collection set."); | |
2534 HeapRegion* cur = r; | |
2535 while (cur != NULL) { | |
2536 HeapRegion* next = cur->next_in_collection_set(); | |
2537 if (cl->doHeapRegion(cur) && false) { | |
2538 cl->incomplete(); | |
2539 return; | |
2540 } | |
2541 cur = next; | |
2542 } | |
2543 cur = g1_policy()->collection_set(); | |
2544 while (cur != r) { | |
2545 HeapRegion* next = cur->next_in_collection_set(); | |
2546 if (cl->doHeapRegion(cur) && false) { | |
2547 cl->incomplete(); | |
2548 return; | |
2549 } | |
2550 cur = next; | |
2551 } | |
2552 } | |
2553 | |
2554 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2555 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2556 } | |
2557 | |
2558 | |
2559 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2560 Space* res = heap_region_containing(addr); | |
2561 if (res == NULL) | |
2562 res = perm_gen()->space_containing(addr); | |
2563 return res; | |
2564 } | |
2565 | |
2566 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2567 Space* sp = space_containing(addr); | |
2568 if (sp != NULL) { | |
2569 return sp->block_start(addr); | |
2570 } | |
2571 return NULL; | |
2572 } | |
2573 | |
2574 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2575 Space* sp = space_containing(addr); | |
2576 assert(sp != NULL, "block_size of address outside of heap"); | |
2577 return sp->block_size(addr); | |
2578 } | |
2579 | |
2580 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2581 Space* sp = space_containing(addr); | |
2582 return sp->block_is_obj(addr); | |
2583 } | |
2584 | |
2585 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2586 return true; | |
2587 } | |
2588 | |
2589 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2590 return HeapRegion::GrainBytes; | |
2591 } | |
2592 | |
2593 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2594 // Return the remaining space in the cur alloc region, but not less than | |
2595 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2596 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2597 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2598 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2599 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2600 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2601 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2602 // between when we test for NULL and when we use it later. |
342 | 2603 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2604 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2605 |
342 | 2606 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2607 return max_tlab_size; |
342 | 2608 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2609 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2610 max_tlab_size); |
342 | 2611 } |
2612 } | |
2613 | |
2614 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2615 return false; | |
2616 } | |
2617 | |
2618 size_t G1CollectedHeap::large_typearray_limit() { | |
2619 // FIXME | |
2620 return HeapRegion::GrainBytes/HeapWordSize; | |
2621 } | |
2622 | |
2623 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2624 return g1_reserved_obj_bytes(); |
342 | 2625 } |
2626 | |
2627 jlong G1CollectedHeap::millis_since_last_gc() { | |
2628 // assert(false, "NYI"); | |
2629 return 0; | |
2630 } | |
2631 | |
2632 | |
2633 void G1CollectedHeap::prepare_for_verify() { | |
2634 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2635 ensure_parsability(false); | |
2636 } | |
2637 g1_rem_set()->prepare_for_verify(); | |
2638 } | |
2639 | |
2640 class VerifyLivenessOopClosure: public OopClosure { | |
2641 G1CollectedHeap* g1h; | |
2642 public: | |
2643 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2644 g1h = _g1h; | |
2645 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2646 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2647 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2648 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2649 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2650 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2651 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2652 "Dead object referenced by a not dead object"); |
342 | 2653 } |
2654 }; | |
2655 | |
2656 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2657 private: |
342 | 2658 G1CollectedHeap* _g1h; |
2659 size_t _live_bytes; | |
2660 HeapRegion *_hr; | |
811 | 2661 bool _use_prev_marking; |
342 | 2662 public: |
811 | 2663 // use_prev_marking == true -> use "prev" marking information, |
2664 // use_prev_marking == false -> use "next" marking information | |
2665 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2666 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2667 _g1h = G1CollectedHeap::heap(); |
2668 } | |
2669 void do_object(oop o) { | |
2670 VerifyLivenessOopClosure isLive(_g1h); | |
2671 assert(o != NULL, "Huh?"); | |
811 | 2672 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2673 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2674 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2675 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2676 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2677 } |
342 | 2678 } |
2679 } | |
2680 size_t live_bytes() { return _live_bytes; } | |
2681 }; | |
2682 | |
2683 class PrintObjsInRegionClosure : public ObjectClosure { | |
2684 HeapRegion *_hr; | |
2685 G1CollectedHeap *_g1; | |
2686 public: | |
2687 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2688 _g1 = G1CollectedHeap::heap(); | |
2689 }; | |
2690 | |
2691 void do_object(oop o) { | |
2692 if (o != NULL) { | |
2693 HeapWord *start = (HeapWord *) o; | |
2694 size_t word_sz = o->size(); | |
2695 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2696 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2697 (void*) o, word_sz, | |
2698 _g1->isMarkedPrev(o), | |
2699 _g1->isMarkedNext(o), | |
2700 _hr->obj_allocated_since_prev_marking(o)); | |
2701 HeapWord *end = start + word_sz; | |
2702 HeapWord *cur; | |
2703 int *val; | |
2704 for (cur = start; cur < end; cur++) { | |
2705 val = (int *) cur; | |
2706 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2707 } | |
2708 } | |
2709 } | |
2710 }; | |
2711 | |
2712 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2713 private: |
342 | 2714 bool _allow_dirty; |
390 | 2715 bool _par; |
811 | 2716 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2717 bool _failures; |
811 | 2718 public: |
2719 // use_prev_marking == true -> use "prev" marking information, | |
2720 // use_prev_marking == false -> use "next" marking information | |
2721 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2722 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2723 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2724 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2725 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2726 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2727 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2728 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2729 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2730 |
342 | 2731 bool doHeapRegion(HeapRegion* r) { |
390 | 2732 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2733 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2734 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2735 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2736 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2737 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2738 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2739 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2740 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2741 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2742 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2743 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2744 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2745 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2746 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2747 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2748 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2749 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2750 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2751 } |
342 | 2752 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2753 return false; // stop the region iteration if we hit a failure |
342 | 2754 } |
2755 }; | |
2756 | |
2757 class VerifyRootsClosure: public OopsInGenClosure { | |
2758 private: | |
2759 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2760 bool _use_prev_marking; |
342 | 2761 bool _failures; |
2762 public: | |
811 | 2763 // use_prev_marking == true -> use "prev" marking information, |
2764 // use_prev_marking == false -> use "next" marking information | |
2765 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2766 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2767 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2768 _failures(false) { } |
342 | 2769 |
2770 bool failures() { return _failures; } | |
2771 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2772 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2773 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2774 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2775 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2776 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2777 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2778 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2779 obj->print_on(gclog_or_tty); |
2780 _failures = true; | |
2781 } | |
2782 } | |
2783 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2784 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2785 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2786 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2787 }; |
2788 | |
390 | 2789 // This is the task used for parallel heap verification. |
2790 | |
2791 class G1ParVerifyTask: public AbstractGangTask { | |
2792 private: | |
2793 G1CollectedHeap* _g1h; | |
2794 bool _allow_dirty; | |
811 | 2795 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2796 bool _failures; |
390 | 2797 |
2798 public: | |
811 | 2799 // use_prev_marking == true -> use "prev" marking information, |
2800 // use_prev_marking == false -> use "next" marking information | |
2801 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2802 bool use_prev_marking) : | |
390 | 2803 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2804 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2805 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2806 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2807 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2808 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2809 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2810 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2811 } |
390 | 2812 |
2813 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2814 HandleMark hm; |
811 | 2815 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2816 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2817 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2818 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2819 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2820 } |
390 | 2821 } |
2822 }; | |
2823 | |
342 | 2824 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2825 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2826 } | |
2827 | |
2828 void G1CollectedHeap::verify(bool allow_dirty, | |
2829 bool silent, | |
2830 bool use_prev_marking) { | |
342 | 2831 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2832 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2833 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2834 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2835 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2836 false, |
342 | 2837 SharedHeap::SO_AllClasses, |
2838 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2839 &blobsCl, |
342 | 2840 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2841 bool failures = rootsCl.failures(); |
342 | 2842 rem_set()->invalidate(perm_gen()->used_region(), false); |
2843 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2844 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2845 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2846 "sanity check"); | |
2847 | |
811 | 2848 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2849 int n_workers = workers()->total_workers(); |
2850 set_par_threads(n_workers); | |
2851 workers()->run_task(&task); | |
2852 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2853 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2854 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2855 } |
390 | 2856 |
2857 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2858 "sanity check"); | |
2859 | |
2860 reset_heap_region_claim_values(); | |
2861 | |
2862 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2863 "sanity check"); | |
2864 } else { | |
811 | 2865 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2866 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2867 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2868 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2869 } |
390 | 2870 } |
342 | 2871 if (!silent) gclog_or_tty->print("remset "); |
2872 rem_set()->verify(); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2873 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2874 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2875 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2876 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2877 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2878 #ifndef PRODUCT |
1044 | 2879 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2880 concurrent_mark()->print_reachable("at-verification-failure", |
2881 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2882 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2883 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2884 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2885 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2886 guarantee(!failures, "there should not have been any failures"); |
342 | 2887 } else { |
2888 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2889 } | |
2890 } | |
2891 | |
2892 class PrintRegionClosure: public HeapRegionClosure { | |
2893 outputStream* _st; | |
2894 public: | |
2895 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2896 bool doHeapRegion(HeapRegion* r) { | |
2897 r->print_on(_st); | |
2898 return false; | |
2899 } | |
2900 }; | |
2901 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2902 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2903 |
2904 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2905 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2906 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2907 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2908 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2909 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2910 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2911 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2912 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2913 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2914 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2915 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2916 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2917 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2918 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2919 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2920 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2921 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2922 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2923 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2924 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2925 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2926 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2927 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2928 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2929 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2930 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2931 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2932 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2933 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2934 PrintRegionClosure blk(st); |
2935 _hrs->iterate(&blk); | |
2936 } | |
2937 | |
2938 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2939 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2940 workers()->print_worker_threads_on(st); |
2941 } | |
2942 | |
2943 _cmThread->print_on(st); | |
342 | 2944 st->cr(); |
1019 | 2945 |
2946 _cm->print_worker_threads_on(st); | |
2947 | |
2948 _cg1r->print_worker_threads_on(st); | |
2949 | |
342 | 2950 _czft->print_on(st); |
2951 st->cr(); | |
2952 } | |
2953 | |
2954 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2955 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 2956 workers()->threads_do(tc); |
2957 } | |
2958 tc->do_thread(_cmThread); | |
794 | 2959 _cg1r->threads_do(tc); |
342 | 2960 tc->do_thread(_czft); |
2961 } | |
2962 | |
2963 void G1CollectedHeap::print_tracing_info() const { | |
2964 // We'll overload this to mean "trace GC pause statistics." | |
2965 if (TraceGen0Time || TraceGen1Time) { | |
2966 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2967 // to that. | |
2968 g1_policy()->print_tracing_info(); | |
2969 } | |
751 | 2970 if (G1SummarizeRSetStats) { |
342 | 2971 g1_rem_set()->print_summary_info(); |
2972 } | |
1282 | 2973 if (G1SummarizeConcMark) { |
342 | 2974 concurrent_mark()->print_summary_info(); |
2975 } | |
751 | 2976 if (G1SummarizeZFStats) { |
342 | 2977 ConcurrentZFThread::print_summary_info(); |
2978 } | |
2979 g1_policy()->print_yg_surv_rate_info(); | |
2980 | |
2981 SpecializationStats::print(); | |
2982 } | |
2983 | |
2984 | |
2985 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2986 HeapRegion* hr = heap_region_containing(addr); | |
2987 if (hr == NULL) { | |
2988 return 0; | |
2989 } else { | |
2990 return 1; | |
2991 } | |
2992 } | |
2993 | |
2994 G1CollectedHeap* G1CollectedHeap::heap() { | |
2995 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2996 "not a garbage-first heap"); | |
2997 return _g1h; | |
2998 } | |
2999 | |
3000 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3001 // always_do_update_barrier = false; |
342 | 3002 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3003 // Call allocation profiler | |
3004 AllocationProfiler::iterate_since_last_gc(); | |
3005 // Fill TLAB's and such | |
3006 ensure_parsability(true); | |
3007 } | |
3008 | |
3009 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
3010 // FIXME: what is this about? | |
3011 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
3012 // is set. | |
3013 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
3014 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3015 // always_do_update_barrier = true; |
342 | 3016 } |
3017 | |
1973 | 3018 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3019 unsigned int gc_count_before, | |
3020 bool* succeeded) { | |
3021 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3022 g1_policy()->record_stop_world_start(); |
1973 | 3023 VM_G1IncCollectionPause op(gc_count_before, |
3024 word_size, | |
3025 false, /* should_initiate_conc_mark */ | |
3026 g1_policy()->max_pause_time_ms(), | |
3027 GCCause::_g1_inc_collection_pause); | |
3028 VMThread::execute(&op); | |
3029 | |
3030 HeapWord* result = op.result(); | |
3031 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3032 assert(result == NULL || ret_succeeded, | |
3033 "the result should be NULL if the VM did not succeed"); | |
3034 *succeeded = ret_succeeded; | |
3035 | |
3036 assert_heap_not_locked(); | |
3037 return result; | |
342 | 3038 } |
3039 | |
3040 void | |
3041 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3042 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3043 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3044 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3045 CGC_lock->notify(); |
342 | 3046 } |
3047 } | |
3048 | |
3049 class VerifyMarkedObjsClosure: public ObjectClosure { | |
3050 G1CollectedHeap* _g1h; | |
3051 public: | |
3052 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
3053 void do_object(oop obj) { | |
3054 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
3055 "markandsweep mark should agree with concurrent deadness"); | |
3056 } | |
3057 }; | |
3058 | |
3059 void | |
3060 G1CollectedHeap::checkConcurrentMark() { | |
3061 VerifyMarkedObjsClosure verifycl(this); | |
3062 // MutexLockerEx x(getMarkBitMapLock(), | |
3063 // Mutex::_no_safepoint_check_flag); | |
678 | 3064 object_iterate(&verifycl, false); |
342 | 3065 } |
3066 | |
3067 void G1CollectedHeap::do_sync_mark() { | |
3068 _cm->checkpointRootsInitial(); | |
3069 _cm->markFromRoots(); | |
3070 _cm->checkpointRootsFinal(false); | |
3071 } | |
3072 | |
3073 // <NEW PREDICTION> | |
3074 | |
3075 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3076 bool young) { | |
3077 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3078 } | |
3079 | |
3080 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3081 predicted_time_ms) { | |
3082 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3083 } | |
3084 | |
3085 size_t G1CollectedHeap::pending_card_num() { | |
3086 size_t extra_cards = 0; | |
3087 JavaThread *curr = Threads::first(); | |
3088 while (curr != NULL) { | |
3089 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3090 extra_cards += dcq.size(); | |
3091 curr = curr->next(); | |
3092 } | |
3093 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3094 size_t buffer_size = dcqs.buffer_size(); | |
3095 size_t buffer_num = dcqs.completed_buffers_num(); | |
3096 return buffer_size * buffer_num + extra_cards; | |
3097 } | |
3098 | |
3099 size_t G1CollectedHeap::max_pending_card_num() { | |
3100 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3101 size_t buffer_size = dcqs.buffer_size(); | |
3102 size_t buffer_num = dcqs.completed_buffers_num(); | |
3103 int thread_num = Threads::number_of_threads(); | |
3104 return (buffer_num + thread_num) * buffer_size; | |
3105 } | |
3106 | |
3107 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3108 return g1_rem_set()->cardsScanned(); |
342 | 3109 } |
3110 | |
3111 void | |
3112 G1CollectedHeap::setup_surviving_young_words() { | |
3113 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3114 size_t array_length = g1_policy()->young_cset_length(); | |
3115 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3116 if (_surviving_young_words == NULL) { | |
3117 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3118 "Not enough space for young surv words summary."); | |
3119 } | |
3120 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3121 #ifdef ASSERT |
342 | 3122 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3123 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3124 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3125 #endif // !ASSERT |
342 | 3126 } |
3127 | |
3128 void | |
3129 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3130 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3131 size_t array_length = g1_policy()->young_cset_length(); | |
3132 for (size_t i = 0; i < array_length; ++i) | |
3133 _surviving_young_words[i] += surv_young_words[i]; | |
3134 } | |
3135 | |
3136 void | |
3137 G1CollectedHeap::cleanup_surviving_young_words() { | |
3138 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3139 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3140 _surviving_young_words = NULL; | |
3141 } | |
3142 | |
3143 // </NEW PREDICTION> | |
3144 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3145 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3146 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3147 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3148 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3149 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3150 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3151 |
1709 | 3152 #if TASKQUEUE_STATS |
3153 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3154 st->print_raw_cr("GC Task Stats"); | |
3155 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3156 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3157 } | |
3158 | |
3159 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3160 print_taskqueue_stats_hdr(st); | |
3161 | |
3162 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3163 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3164 for (int i = 0; i < n; ++i) { |
3165 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3166 totals += task_queue(i)->stats; | |
3167 } | |
3168 st->print_raw("tot "); totals.print(st); st->cr(); | |
3169 | |
3170 DEBUG_ONLY(totals.verify()); | |
3171 } | |
3172 | |
3173 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3174 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3175 for (int i = 0; i < n; ++i) { |
3176 task_queue(i)->stats.reset(); | |
3177 } | |
3178 } | |
3179 #endif // TASKQUEUE_STATS | |
3180 | |
1973 | 3181 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3182 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3183 if (GC_locker::check_active_before_gc()) { |
1973 | 3184 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3185 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3186 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3187 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3188 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3189 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3190 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3191 { |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3192 ResourceMark rm; |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3193 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3194 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3195 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3196 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3197 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3198 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3199 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3200 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3201 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3202 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3203 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3204 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3205 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3206 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3207 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3208 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3209 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3210 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3211 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3212 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3213 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3214 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3215 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3216 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3217 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3218 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3219 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3220 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3221 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3222 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3223 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3224 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3225 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3226 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3227 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3228 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3229 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3230 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3231 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3232 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3233 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3234 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3235 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3236 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3237 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3238 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3239 increment_total_collections(false /* full gc */); |
342 | 3240 |
3241 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3242 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3243 print(); |
342 | 3244 #endif |
3245 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3246 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3247 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3248 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3249 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3250 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3251 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3252 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3253 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3254 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3255 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3256 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3257 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3258 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3259 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3260 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3261 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3262 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3263 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3264 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3265 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3267 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3268 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3269 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3270 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3271 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3272 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3273 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3274 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3275 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3276 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3277 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3278 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3279 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3280 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3281 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3282 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3283 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3284 #endif // YOUNG_LIST_VERBOSE |
342 | 3285 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3286 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3287 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3288 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3289 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3290 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3291 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3292 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3293 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3294 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3295 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3296 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3297 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3298 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3299 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3300 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3301 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3302 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3303 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3304 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3305 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3306 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3307 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3308 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3309 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3310 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3311 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3312 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3313 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3314 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3315 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3316 |
1707 | 3317 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3318 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3319 // Nothing to do if we were unable to choose a collection set. |
342 | 3320 #if G1_REM_SET_LOGGING |
1707 | 3321 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3322 print(); | |
342 | 3323 #endif |
1707 | 3324 PrepareForRSScanningClosure prepare_for_rs_scan; |
3325 collection_set_iterate(&prepare_for_rs_scan); | |
3326 | |
3327 setup_surviving_young_words(); | |
3328 | |
3329 // Set up the gc allocation regions. | |
3330 get_gc_alloc_regions(); | |
3331 | |
3332 // Actually do the work... | |
3333 evacuate_collection_set(); | |
3334 | |
3335 free_collection_set(g1_policy()->collection_set()); | |
3336 g1_policy()->clear_collection_set(); | |
3337 | |
3338 cleanup_surviving_young_words(); | |
3339 | |
3340 // Start a new incremental collection set for the next pause. | |
3341 g1_policy()->start_incremental_cset_building(); | |
3342 | |
3343 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3344 // regions to the incremental collection set for the next | |
3345 // evacuation pause. | |
3346 clear_cset_fast_test(); | |
3347 | |
3348 if (g1_policy()->in_young_gc_mode()) { | |
3349 _young_list->reset_sampled_info(); | |
3350 | |
3351 // Don't check the whole heap at this point as the | |
3352 // GC alloc regions from this pause have been tagged | |
3353 // as survivors and moved on to the survivor list. | |
3354 // Survivor regions will fail the !is_young() check. | |
3355 assert(check_young_list_empty(false /* check_heap */), | |
3356 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3357 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3358 #if YOUNG_LIST_VERBOSE |
1707 | 3359 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3360 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3361 #endif // YOUNG_LIST_VERBOSE |
342 | 3362 |
1707 | 3363 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3364 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3365 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3366 |
1707 | 3367 _young_list->reset_auxilary_lists(); |
342 | 3368 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3369 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3370 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3371 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3372 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3373 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3374 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3375 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3376 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3377 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3378 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3379 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3380 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3381 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3382 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3383 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3384 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3385 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3386 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3387 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3388 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3389 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3390 } |
342 | 3391 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3392 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3393 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3394 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3395 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3396 #endif // YOUNG_LIST_VERBOSE |
342 | 3397 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3398 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3399 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3400 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3401 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3402 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3403 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3404 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3405 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3406 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3407 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3408 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3409 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3410 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3411 Universe::verify(false); |
342 | 3412 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3413 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3414 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3415 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3416 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3417 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3418 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3419 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3420 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3421 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3422 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3423 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3424 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3425 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3426 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3427 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3428 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3429 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3430 #endif |
342 | 3431 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3432 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3433 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3434 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3435 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3436 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3437 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3438 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3439 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3440 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3441 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3442 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3443 |
1709 | 3444 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3445 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3446 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3447 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3448 Universe::print_heap_after_gc(); |
342 | 3449 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3450 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3451 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3452 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3453 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3454 } |
1973 | 3455 |
3456 return true; | |
342 | 3457 } |
3458 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3459 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3460 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3461 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3462 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3463 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3464 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3465 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3466 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3467 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3468 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3469 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3470 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3471 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3472 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3473 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3474 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3475 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3476 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3477 |
342 | 3478 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3479 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3480 // make sure we don't call set_gc_alloc_region() multiple times on |
3481 // the same region | |
3482 assert(r == NULL || !r->is_gc_alloc_region(), | |
3483 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3484 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3485 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3486 |
342 | 3487 HeapWord* original_top = NULL; |
3488 if (r != NULL) | |
3489 original_top = r->top(); | |
3490 | |
3491 // We will want to record the used space in r as being there before gc. | |
3492 // One we install it as a GC alloc region it's eligible for allocation. | |
3493 // So record it now and use it later. | |
3494 size_t r_used = 0; | |
3495 if (r != NULL) { | |
3496 r_used = r->used(); | |
3497 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3498 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3499 // need to take the lock to guard against two threads calling |
3500 // get_gc_alloc_region concurrently (very unlikely but...) | |
3501 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3502 r->save_marks(); | |
3503 } | |
3504 } | |
3505 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3506 _gc_alloc_regions[purpose] = r; | |
3507 if (old_alloc_region != NULL) { | |
3508 // Replace aliases too. | |
3509 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3510 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3511 _gc_alloc_regions[ap] = r; | |
3512 } | |
3513 } | |
3514 } | |
3515 if (r != NULL) { | |
3516 push_gc_alloc_region(r); | |
3517 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3518 // We are using a region as a GC alloc region after it has been used | |
3519 // as a mutator allocation region during the current marking cycle. | |
3520 // The mutator-allocated objects are currently implicitly marked, but | |
3521 // when we move hr->next_top_at_mark_start() forward at the the end | |
3522 // of the GC pause, they won't be. We therefore mark all objects in | |
3523 // the "gap". We do this object-by-object, since marking densely | |
3524 // does not currently work right with marking bitmap iteration. This | |
3525 // means we rely on TLAB filling at the start of pauses, and no | |
3526 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3527 // to fix the marking bitmap iteration. | |
3528 HeapWord* curhw = r->next_top_at_mark_start(); | |
3529 HeapWord* t = original_top; | |
3530 | |
3531 while (curhw < t) { | |
3532 oop cur = (oop)curhw; | |
3533 // We'll assume parallel for generality. This is rare code. | |
3534 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3535 curhw = curhw + cur->size(); | |
3536 } | |
3537 assert(curhw == t, "Should have parsed correctly."); | |
3538 } | |
3539 if (G1PolicyVerbose > 1) { | |
3540 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3541 "for survivors:", r->bottom(), original_top, r->end()); | |
3542 r->print(); | |
3543 } | |
3544 g1_policy()->record_before_bytes(r_used); | |
3545 } | |
3546 } | |
3547 | |
3548 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3549 assert(Thread::current()->is_VM_thread() || | |
3550 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
3551 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
3552 "Precondition."); | |
3553 hr->set_is_gc_alloc_region(true); | |
3554 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3555 _gc_alloc_region_list = hr; | |
3556 } | |
3557 | |
3558 #ifdef G1_DEBUG | |
3559 class FindGCAllocRegion: public HeapRegionClosure { | |
3560 public: | |
3561 bool doHeapRegion(HeapRegion* r) { | |
3562 if (r->is_gc_alloc_region()) { | |
3563 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3564 r->hrs_index(), r->bottom()); | |
3565 } | |
3566 return false; | |
3567 } | |
3568 }; | |
3569 #endif // G1_DEBUG | |
3570 | |
3571 void G1CollectedHeap::forget_alloc_region_list() { | |
3572 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
3573 while (_gc_alloc_region_list != NULL) { | |
3574 HeapRegion* r = _gc_alloc_region_list; | |
3575 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3576 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3577 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3578 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3579 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3580 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3581 r->ContiguousSpace::set_saved_mark(); |
342 | 3582 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3583 r->set_next_gc_alloc_region(NULL); | |
3584 r->set_is_gc_alloc_region(false); | |
545 | 3585 if (r->is_survivor()) { |
3586 if (r->is_empty()) { | |
3587 r->set_not_young(); | |
3588 } else { | |
3589 _young_list->add_survivor_region(r); | |
3590 } | |
3591 } | |
342 | 3592 if (r->is_empty()) { |
3593 ++_free_regions; | |
3594 } | |
3595 } | |
3596 #ifdef G1_DEBUG | |
3597 FindGCAllocRegion fa; | |
3598 heap_region_iterate(&fa); | |
3599 #endif // G1_DEBUG | |
3600 } | |
3601 | |
3602 | |
3603 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3604 // TODO: allocation regions check | |
3605 return true; | |
3606 } | |
3607 | |
3608 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3609 // First, let's check that the GC alloc region list is empty (it should) |
3610 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3611 | |
342 | 3612 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3613 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3614 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3615 |
342 | 3616 // Create new GC alloc regions. |
636 | 3617 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3618 _retained_gc_alloc_regions[ap] = NULL; | |
3619 | |
3620 if (alloc_region != NULL) { | |
3621 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3622 | |
3623 // let's make sure that the GC alloc region is not tagged as such | |
3624 // outside a GC operation | |
3625 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3626 | |
3627 if (alloc_region->in_collection_set() || | |
3628 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3629 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3630 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3631 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3632 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3633 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3634 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3635 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3636 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3637 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3638 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3639 // object that may be less than the region size). |
636 | 3640 |
3641 alloc_region = NULL; | |
3642 } | |
3643 } | |
3644 | |
3645 if (alloc_region == NULL) { | |
3646 // we will get a new GC alloc region | |
342 | 3647 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3648 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3649 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3650 ++_gc_alloc_region_counts[ap]; |
1388 | 3651 if (G1PrintHeapRegions) { |
3652 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3653 "top "PTR_FORMAT, | |
3654 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3655 } | |
342 | 3656 } |
636 | 3657 |
342 | 3658 if (alloc_region != NULL) { |
636 | 3659 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3660 set_gc_alloc_region(ap, alloc_region); |
3661 } | |
636 | 3662 |
3663 assert(_gc_alloc_regions[ap] == NULL || | |
3664 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3665 "the GC alloc region should be tagged as such"); | |
3666 assert(_gc_alloc_regions[ap] == NULL || | |
3667 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3668 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3669 } |
3670 // Set alternative regions for allocation purposes that have reached | |
636 | 3671 // their limit. |
342 | 3672 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3673 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3674 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3675 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3676 } | |
3677 } | |
3678 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3679 } | |
3680 | |
636 | 3681 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3682 // We keep a separate list of all regions that have been alloc regions in |
636 | 3683 // the current collection pause. Forget that now. This method will |
3684 // untag the GC alloc regions and tear down the GC alloc region | |
3685 // list. It's desirable that no regions are tagged as GC alloc | |
3686 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3687 |
342 | 3688 forget_alloc_region_list(); |
3689 | |
3690 // The current alloc regions contain objs that have survived | |
3691 // collection. Make them no longer GC alloc regions. | |
3692 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3693 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3694 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3695 _gc_alloc_region_counts[ap] = 0; |
636 | 3696 |
3697 if (r != NULL) { | |
3698 // we retain nothing on _gc_alloc_regions between GCs | |
3699 set_gc_alloc_region(ap, NULL); | |
3700 | |
3701 if (r->is_empty()) { | |
3702 // we didn't actually allocate anything in it; let's just put | |
3703 // it on the free list | |
342 | 3704 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3705 r->set_zero_fill_complete(); | |
3706 put_free_region_on_list_locked(r); | |
636 | 3707 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3708 // retain it so that we can use it at the beginning of the next GC | |
3709 _retained_gc_alloc_regions[ap] = r; | |
342 | 3710 } |
3711 } | |
636 | 3712 } |
3713 } | |
3714 | |
3715 #ifndef PRODUCT | |
3716 // Useful for debugging | |
3717 | |
3718 void G1CollectedHeap::print_gc_alloc_regions() { | |
3719 gclog_or_tty->print_cr("GC alloc regions"); | |
3720 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3721 HeapRegion* r = _gc_alloc_regions[ap]; | |
3722 if (r == NULL) { | |
3723 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3724 } else { | |
3725 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3726 ap, r->bottom(), r->used()); | |
3727 } | |
3728 } | |
3729 } | |
3730 #endif // PRODUCT | |
342 | 3731 |
3732 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3733 _drain_in_progress = false; | |
3734 set_evac_failure_closure(cl); | |
3735 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3736 } | |
3737 | |
3738 void G1CollectedHeap::finalize_for_evac_failure() { | |
3739 assert(_evac_failure_scan_stack != NULL && | |
3740 _evac_failure_scan_stack->length() == 0, | |
3741 "Postcondition"); | |
3742 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3743 delete _evac_failure_scan_stack; |
342 | 3744 _evac_failure_scan_stack = NULL; |
3745 } | |
3746 | |
3747 | |
3748 | |
3749 // *** Sequential G1 Evacuation | |
3750 | |
3751 class G1IsAliveClosure: public BoolObjectClosure { | |
3752 G1CollectedHeap* _g1; | |
3753 public: | |
3754 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3755 void do_object(oop p) { assert(false, "Do not call."); } | |
3756 bool do_object_b(oop p) { | |
3757 // It is reachable if it is outside the collection set, or is inside | |
3758 // and forwarded. | |
3759 | |
3760 #ifdef G1_DEBUG | |
3761 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3762 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3763 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3764 #endif // G1_DEBUG | |
3765 | |
3766 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3767 } | |
3768 }; | |
3769 | |
3770 class G1KeepAliveClosure: public OopClosure { | |
3771 G1CollectedHeap* _g1; | |
3772 public: | |
3773 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3774 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3775 void do_oop( oop* p) { |
342 | 3776 oop obj = *p; |
3777 #ifdef G1_DEBUG | |
3778 if (PrintGC && Verbose) { | |
3779 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3780 p, (void*) obj, (void*) *p); | |
3781 } | |
3782 #endif // G1_DEBUG | |
3783 | |
3784 if (_g1->obj_in_cs(obj)) { | |
3785 assert( obj->is_forwarded(), "invariant" ); | |
3786 *p = obj->forwardee(); | |
3787 #ifdef G1_DEBUG | |
3788 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3789 (void*) obj, (void*) *p); | |
3790 #endif // G1_DEBUG | |
3791 } | |
3792 } | |
3793 }; | |
3794 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3795 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3796 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3797 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3798 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3799 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3800 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3801 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3802 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3803 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3804 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3805 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3806 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3807 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3808 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3809 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3810 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3811 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3812 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3813 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3814 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3815 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3816 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3817 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3818 |
342 | 3819 class RemoveSelfPointerClosure: public ObjectClosure { |
3820 private: | |
3821 G1CollectedHeap* _g1; | |
3822 ConcurrentMark* _cm; | |
3823 HeapRegion* _hr; | |
3824 size_t _prev_marked_bytes; | |
3825 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3826 OopsInHeapRegionClosure *_cl; |
342 | 3827 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3828 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3829 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3830 _next_marked_bytes(0), _cl(cl) {} |
342 | 3831 |
3832 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3833 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3834 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3835 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3836 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3837 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3838 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3839 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3840 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3841 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3842 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3843 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3844 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3845 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3846 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3847 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3848 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3849 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3850 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3851 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3852 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3853 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3854 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3855 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3856 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3857 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3858 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3859 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3860 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3861 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3862 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3863 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3864 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3865 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3866 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3867 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3868 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3869 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3870 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3871 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3872 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3873 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3874 CollectedHeap::fill_with_object(mr); |
342 | 3875 _cm->clearRangeBothMaps(mr); |
3876 } | |
3877 } | |
3878 }; | |
3879 | |
3880 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3881 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3882 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3883 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3884 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3885 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3886 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3887 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3888 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3889 } |
342 | 3890 HeapRegion* cur = g1_policy()->collection_set(); |
3891 while (cur != NULL) { | |
3892 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3893 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3894 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3895 if (cur->evacuation_failed()) { |
3896 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3897 cl->set_region(cur); |
342 | 3898 cur->object_iterate(&rspc); |
3899 | |
3900 // A number of manipulations to make the TAMS be the current top, | |
3901 // and the marked bytes be the ones observed in the iteration. | |
3902 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3903 // The comments below are the postconditions achieved by the | |
3904 // calls. Note especially the last such condition, which says that | |
3905 // the count of marked bytes has been properly restored. | |
3906 cur->note_start_of_marking(false); | |
3907 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3908 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3909 // _next_marked_bytes == prev_marked_bytes. | |
3910 cur->note_end_of_marking(); | |
3911 // _prev_top_at_mark_start == top(), | |
3912 // _prev_marked_bytes == prev_marked_bytes | |
3913 } | |
3914 // If there is no mark in progress, we modified the _next variables | |
3915 // above needlessly, but harmlessly. | |
3916 if (_g1h->mark_in_progress()) { | |
3917 cur->note_start_of_marking(false); | |
3918 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3919 // _next_marked_bytes == next_marked_bytes. | |
3920 } | |
3921 | |
3922 // Now make sure the region has the right index in the sorted array. | |
3923 g1_policy()->note_change_in_marked_bytes(cur); | |
3924 } | |
3925 cur = cur->next_in_collection_set(); | |
3926 } | |
3927 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3928 | |
3929 // Now restore saved marks, if any. | |
3930 if (_objs_with_preserved_marks != NULL) { | |
3931 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3932 assert(_objs_with_preserved_marks->length() == | |
3933 _preserved_marks_of_objs->length(), "Both or none."); | |
3934 guarantee(_objs_with_preserved_marks->length() == | |
3935 _preserved_marks_of_objs->length(), "Both or none."); | |
3936 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3937 oop obj = _objs_with_preserved_marks->at(i); | |
3938 markOop m = _preserved_marks_of_objs->at(i); | |
3939 obj->set_mark(m); | |
3940 } | |
3941 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3942 delete _objs_with_preserved_marks; | |
3943 delete _preserved_marks_of_objs; | |
3944 _objs_with_preserved_marks = NULL; | |
3945 _preserved_marks_of_objs = NULL; | |
3946 } | |
3947 } | |
3948 | |
3949 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3950 _evac_failure_scan_stack->push(obj); | |
3951 } | |
3952 | |
3953 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3954 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3955 | |
3956 while (_evac_failure_scan_stack->length() > 0) { | |
3957 oop obj = _evac_failure_scan_stack->pop(); | |
3958 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3959 obj->oop_iterate_backwards(_evac_failure_closure); | |
3960 } | |
3961 } | |
3962 | |
3963 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3964 markOop m = old->mark(); | |
3965 // forward to self | |
3966 assert(!old->is_forwarded(), "precondition"); | |
3967 | |
3968 old->forward_to(old); | |
3969 handle_evacuation_failure_common(old, m); | |
3970 } | |
3971 | |
3972 oop | |
3973 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3974 oop old) { | |
3975 markOop m = old->mark(); | |
3976 oop forward_ptr = old->forward_to_atomic(old); | |
3977 if (forward_ptr == NULL) { | |
3978 // Forward-to-self succeeded. | |
3979 if (_evac_failure_closure != cl) { | |
3980 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3981 assert(!_drain_in_progress, | |
3982 "Should only be true while someone holds the lock."); | |
3983 // Set the global evac-failure closure to the current thread's. | |
3984 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3985 set_evac_failure_closure(cl); | |
3986 // Now do the common part. | |
3987 handle_evacuation_failure_common(old, m); | |
3988 // Reset to NULL. | |
3989 set_evac_failure_closure(NULL); | |
3990 } else { | |
3991 // The lock is already held, and this is recursive. | |
3992 assert(_drain_in_progress, "This should only be the recursive case."); | |
3993 handle_evacuation_failure_common(old, m); | |
3994 } | |
3995 return old; | |
3996 } else { | |
3997 // Someone else had a place to copy it. | |
3998 return forward_ptr; | |
3999 } | |
4000 } | |
4001 | |
4002 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
4003 set_evacuation_failed(true); | |
4004 | |
4005 preserve_mark_if_necessary(old, m); | |
4006 | |
4007 HeapRegion* r = heap_region_containing(old); | |
4008 if (!r->evacuation_failed()) { | |
4009 r->set_evacuation_failed(true); | |
1282 | 4010 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4011 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 4012 "["PTR_FORMAT","PTR_FORMAT")\n", |
4013 r, r->bottom(), r->end()); | |
4014 } | |
4015 } | |
4016 | |
4017 push_on_evac_failure_scan_stack(old); | |
4018 | |
4019 if (!_drain_in_progress) { | |
4020 // prevent recursion in copy_to_survivor_space() | |
4021 _drain_in_progress = true; | |
4022 drain_evac_failure_scan_stack(); | |
4023 _drain_in_progress = false; | |
4024 } | |
4025 } | |
4026 | |
4027 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
4028 if (m != markOopDesc::prototype()) { | |
4029 if (_objs_with_preserved_marks == NULL) { | |
4030 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4031 _objs_with_preserved_marks = | |
4032 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4033 _preserved_marks_of_objs = | |
4034 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4035 } | |
4036 _objs_with_preserved_marks->push(obj); | |
4037 _preserved_marks_of_objs->push(m); | |
4038 } | |
4039 } | |
4040 | |
4041 // *** Parallel G1 Evacuation | |
4042 | |
4043 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4044 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4045 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4046 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4047 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4048 |
342 | 4049 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4050 // let the caller handle alloc failure | |
4051 if (alloc_region == NULL) return NULL; | |
4052 | |
4053 HeapWord* block = alloc_region->par_allocate(word_size); | |
4054 if (block == NULL) { | |
4055 MutexLockerEx x(par_alloc_during_gc_lock(), | |
4056 Mutex::_no_safepoint_check_flag); | |
4057 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4058 } | |
4059 return block; | |
4060 } | |
4061 | |
545 | 4062 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4063 bool par) { | |
4064 // Another thread might have obtained alloc_region for the given | |
4065 // purpose, and might be attempting to allocate in it, and might | |
4066 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4067 // region below until we're sure the last allocation has happened. | |
4068 // We ensure this by allocating the remaining space with a garbage | |
4069 // object. | |
4070 if (par) par_allocate_remaining_space(alloc_region); | |
4071 // Now we can do the post-GC stuff on the region. | |
4072 alloc_region->note_end_of_copying(); | |
4073 g1_policy()->record_after_bytes(alloc_region->used()); | |
4074 } | |
4075 | |
342 | 4076 HeapWord* |
4077 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4078 HeapRegion* alloc_region, | |
4079 bool par, | |
4080 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4081 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4082 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4083 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4084 |
342 | 4085 HeapWord* block = NULL; |
4086 // In the parallel case, a previous thread to obtain the lock may have | |
4087 // already assigned a new gc_alloc_region. | |
4088 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4089 assert(par, "But should only happen in parallel case."); | |
4090 alloc_region = _gc_alloc_regions[purpose]; | |
4091 if (alloc_region == NULL) return NULL; | |
4092 block = alloc_region->par_allocate(word_size); | |
4093 if (block != NULL) return block; | |
4094 // Otherwise, continue; this new region is empty, too. | |
4095 } | |
4096 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4097 retire_alloc_region(alloc_region, par); |
342 | 4098 |
4099 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4100 // Cannot allocate more regions for the given purpose. | |
4101 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4102 // Is there an alternative? | |
4103 if (purpose != alt_purpose) { | |
4104 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4105 // Has not the alternative region been aliased? | |
545 | 4106 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4107 // Try to allocate in the alternative region. |
4108 if (par) { | |
4109 block = alt_region->par_allocate(word_size); | |
4110 } else { | |
4111 block = alt_region->allocate(word_size); | |
4112 } | |
4113 // Make an alias. | |
4114 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4115 if (block != NULL) { |
4116 return block; | |
4117 } | |
4118 retire_alloc_region(alt_region, par); | |
342 | 4119 } |
4120 // Both the allocation region and the alternative one are full | |
4121 // and aliased, replace them with a new allocation region. | |
4122 purpose = alt_purpose; | |
4123 } else { | |
4124 set_gc_alloc_region(purpose, NULL); | |
4125 return NULL; | |
4126 } | |
4127 } | |
4128 | |
4129 // Now allocate a new region for allocation. | |
4130 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
4131 | |
4132 // let the caller handle alloc failure | |
4133 if (alloc_region != NULL) { | |
4134 | |
4135 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4136 assert(alloc_region->saved_mark_at_top(), | |
4137 "Mark should have been saved already."); | |
4138 // We used to assert that the region was zero-filled here, but no | |
4139 // longer. | |
4140 | |
4141 // This must be done last: once it's installed, other regions may | |
4142 // allocate in it (without holding the lock.) | |
4143 set_gc_alloc_region(purpose, alloc_region); | |
4144 | |
4145 if (par) { | |
4146 block = alloc_region->par_allocate(word_size); | |
4147 } else { | |
4148 block = alloc_region->allocate(word_size); | |
4149 } | |
4150 // Caller handles alloc failure. | |
4151 } else { | |
4152 // This sets other apis using the same old alloc region to NULL, also. | |
4153 set_gc_alloc_region(purpose, NULL); | |
4154 } | |
4155 return block; // May be NULL. | |
4156 } | |
4157 | |
4158 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4159 HeapWord* block = NULL; | |
4160 size_t free_words; | |
4161 do { | |
4162 free_words = r->free()/HeapWordSize; | |
4163 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4164 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4165 // Otherwise, try to claim it. |
4166 block = r->par_allocate(free_words); | |
4167 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4168 fill_with_object(block, free_words); |
342 | 4169 } |
4170 | |
4171 #ifndef PRODUCT | |
4172 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4173 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4174 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4175 return true; | |
4176 } | |
4177 #endif // PRODUCT | |
4178 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4179 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4180 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4181 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4182 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4183 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4184 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4185 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4186 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4187 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4188 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4189 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4190 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4191 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4192 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4193 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4194 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4195 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4196 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4197 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4198 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4199 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4200 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4201 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4202 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4203 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4204 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4205 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4206 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4207 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4208 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4209 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4210 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4211 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4212 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4213 } |
342 | 4214 |
1709 | 4215 void |
4216 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4217 { | |
4218 st->print_raw_cr("GC Termination Stats"); | |
4219 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4220 " ------waste (KiB)------"); | |
4221 st->print_raw_cr("thr ms ms % ms % attempts" | |
4222 " total alloc undo"); | |
4223 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4224 " ------- ------- -------"); | |
4225 } | |
4226 | |
4227 void | |
4228 G1ParScanThreadState::print_termination_stats(int i, | |
4229 outputStream* const st) const | |
4230 { | |
4231 const double elapsed_ms = elapsed_time() * 1000.0; | |
4232 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4233 const double term_ms = term_time() * 1000.0; | |
4234 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4235 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4236 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4237 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4238 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4239 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4240 alloc_buffer_waste() * HeapWordSize / K, | |
4241 undo_waste() * HeapWordSize / K); | |
4242 } | |
4243 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4244 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4245 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4246 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4247 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4248 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4249 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4250 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4251 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4252 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4253 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4254 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4255 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4256 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4257 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4258 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4259 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4260 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4261 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4262 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4263 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4264 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4265 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4266 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4267 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4268 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4269 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4270 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4271 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4272 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4273 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4274 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4275 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4276 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4277 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4278 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4279 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4280 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4281 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4282 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4283 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4284 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4285 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4286 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4287 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4288 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4289 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4290 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4291 |
342 | 4292 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4293 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4294 _par_scan_state(par_scan_state) { } | |
4295 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4296 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4297 // This is called _after_ do_oop_work has been called, hence after |
4298 // the object has been relocated to its new location and *p points | |
4299 // to its new location. | |
4300 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4301 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4302 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4303 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4304 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 4305 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4306 HeapWord* addr = (HeapWord*)obj; |
342 | 4307 if (_g1->is_in_g1_reserved(addr)) |
4308 _cm->grayRoot(oop(addr)); | |
4309 } | |
4310 } | |
4311 | |
4312 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4313 size_t word_sz = old->size(); | |
4314 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4315 // +1 to make the -1 indexes valid... | |
4316 int young_index = from_region->young_index_in_cset()+1; | |
4317 assert( (from_region->is_young() && young_index > 0) || | |
4318 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4319 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4320 markOop m = old->mark(); | |
545 | 4321 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4322 : m->age(); | |
4323 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4324 word_sz); |
4325 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4326 oop obj = oop(obj_ptr); | |
4327 | |
4328 if (obj_ptr == NULL) { | |
4329 // This will either forward-to-self, or detect that someone else has | |
4330 // installed a forwarding pointer. | |
4331 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4332 return _g1->handle_evacuation_failure_par(cl, old); | |
4333 } | |
4334 | |
526 | 4335 // We're going to allocate linearly, so might as well prefetch ahead. |
4336 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4337 | |
342 | 4338 oop forward_ptr = old->forward_to_atomic(obj); |
4339 if (forward_ptr == NULL) { | |
4340 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4341 if (g1p->track_object_age(alloc_purpose)) { |
4342 // We could simply do obj->incr_age(). However, this causes a | |
4343 // performance issue. obj->incr_age() will first check whether | |
4344 // the object has a displaced mark by checking its mark word; | |
4345 // getting the mark word from the new location of the object | |
4346 // stalls. So, given that we already have the mark word and we | |
4347 // are about to install it anyway, it's better to increase the | |
4348 // age on the mark word, when the object does not have a | |
4349 // displaced mark word. We're not expecting many objects to have | |
4350 // a displaced marked word, so that case is not optimized | |
4351 // further (it could be...) and we simply call obj->incr_age(). | |
4352 | |
4353 if (m->has_displaced_mark_helper()) { | |
4354 // in this case, we have to install the mark word first, | |
4355 // otherwise obj looks to be forwarded (the old mark word, | |
4356 // which contains the forward pointer, was copied) | |
4357 obj->set_mark(m); | |
4358 obj->incr_age(); | |
4359 } else { | |
4360 m = m->incr_age(); | |
545 | 4361 obj->set_mark(m); |
526 | 4362 } |
545 | 4363 _par_scan_state->age_table()->add(obj, word_sz); |
4364 } else { | |
4365 obj->set_mark(m); | |
526 | 4366 } |
4367 | |
342 | 4368 // preserve "next" mark bit |
4369 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4370 if (!use_local_bitmaps || | |
4371 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4372 // if we couldn't mark it on the local bitmap (this happens when | |
4373 // the object was not allocated in the GCLab), we have to bite | |
4374 // the bullet and do the standard parallel mark | |
4375 _cm->markAndGrayObjectIfNecessary(obj); | |
4376 } | |
4377 #if 1 | |
4378 if (_g1->isMarkedNext(old)) { | |
4379 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4380 } | |
4381 #endif | |
4382 } | |
4383 | |
4384 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4385 surv_young_words[young_index] += word_sz; | |
4386 | |
4387 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4388 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4389 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4390 _par_scan_state->push_on_queue(old_p); |
342 | 4391 } else { |
526 | 4392 // No point in using the slower heap_region_containing() method, |
4393 // given that we know obj is in the heap. | |
4394 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4395 obj->oop_iterate_backwards(_scanner); |
4396 } | |
4397 } else { | |
4398 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4399 obj = forward_ptr; | |
4400 } | |
4401 return obj; | |
4402 } | |
4403 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4404 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4405 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4406 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4407 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4408 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4409 assert(barrier != G1BarrierRS || obj != NULL, |
4410 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4411 | |
526 | 4412 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4413 if (_g1->in_cset_fast_test(obj)) { |
342 | 4414 #if G1_REM_SET_LOGGING |
526 | 4415 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4416 "into CS.", p, (void*) obj); | |
342 | 4417 #endif |
526 | 4418 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4419 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4420 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4421 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4422 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4423 } |
526 | 4424 // When scanning the RS, we only care about objs in CS. |
4425 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4426 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4427 } |
526 | 4428 } |
4429 | |
4430 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4431 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4432 } |
4433 | |
4434 if (do_gen_barrier && obj != NULL) { | |
4435 par_do_barrier(p); | |
4436 } | |
4437 } | |
4438 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4439 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4440 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4441 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4442 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4443 assert(has_partial_array_mask(p), "invariant"); |
4444 oop old = clear_partial_array_mask(p); | |
342 | 4445 assert(old->is_objArray(), "must be obj array"); |
4446 assert(old->is_forwarded(), "must be forwarded"); | |
4447 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4448 | |
4449 objArrayOop obj = objArrayOop(old->forwardee()); | |
4450 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4451 // Process ParGCArrayScanChunk elements now | |
4452 // and push the remainder back onto queue | |
4453 int start = arrayOop(old)->length(); | |
4454 int end = obj->length(); | |
4455 int remainder = end - start; | |
4456 assert(start <= end, "just checking"); | |
4457 if (remainder > 2 * ParGCArrayScanChunk) { | |
4458 // Test above combines last partial chunk with a full chunk | |
4459 end = start + ParGCArrayScanChunk; | |
4460 arrayOop(old)->set_length(end); | |
4461 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4462 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4463 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4464 _par_scan_state->push_on_queue(old_p); |
342 | 4465 } else { |
4466 // Restore length so that the heap remains parsable in | |
4467 // case of evacuation failure. | |
4468 arrayOop(old)->set_length(end); | |
4469 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4470 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4471 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4472 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4473 } |
4474 | |
4475 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4476 protected: | |
4477 G1CollectedHeap* _g1h; | |
4478 G1ParScanThreadState* _par_scan_state; | |
4479 RefToScanQueueSet* _queues; | |
4480 ParallelTaskTerminator* _terminator; | |
4481 | |
4482 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4483 RefToScanQueueSet* queues() { return _queues; } | |
4484 ParallelTaskTerminator* terminator() { return _terminator; } | |
4485 | |
4486 public: | |
4487 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4488 G1ParScanThreadState* par_scan_state, | |
4489 RefToScanQueueSet* queues, | |
4490 ParallelTaskTerminator* terminator) | |
4491 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4492 _queues(queues), _terminator(terminator) {} | |
4493 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4494 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4495 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4496 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4497 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4498 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4499 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4500 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4501 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4502 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4503 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4504 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4505 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4506 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4507 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4508 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4509 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4510 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4511 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4512 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4513 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4514 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4515 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4516 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4517 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4518 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4519 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4520 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4521 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4522 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4523 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4524 // we drain the queues as necessary. |
342 | 4525 pss->trim_queue(); |
4526 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4527 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4528 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4529 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4530 } |
342 | 4531 |
4532 class G1ParTask : public AbstractGangTask { | |
4533 protected: | |
4534 G1CollectedHeap* _g1h; | |
4535 RefToScanQueueSet *_queues; | |
4536 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4537 int _n_workers; |
342 | 4538 |
4539 Mutex _stats_lock; | |
4540 Mutex* stats_lock() { return &_stats_lock; } | |
4541 | |
4542 size_t getNCards() { | |
4543 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4544 / G1BlockOffsetSharedArray::N_bytes; | |
4545 } | |
4546 | |
4547 public: | |
4548 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4549 : AbstractGangTask("G1 collection"), | |
4550 _g1h(g1h), | |
4551 _queues(task_queues), | |
4552 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4553 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4554 _n_workers(workers) |
342 | 4555 {} |
4556 | |
4557 RefToScanQueueSet* queues() { return _queues; } | |
4558 | |
4559 RefToScanQueue *work_queue(int i) { | |
4560 return queues()->queue(i); | |
4561 } | |
4562 | |
4563 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4564 if (i >= _n_workers) return; // no work needed this round |
1611 | 4565 |
4566 double start_time_ms = os::elapsedTime() * 1000.0; | |
4567 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4568 | |
342 | 4569 ResourceMark rm; |
4570 HandleMark hm; | |
4571 | |
526 | 4572 G1ParScanThreadState pss(_g1h, i); |
4573 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4574 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4575 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4576 |
4577 pss.set_evac_closure(&scan_evac_cl); | |
4578 pss.set_evac_failure_closure(&evac_failure_cl); | |
4579 pss.set_partial_scan_closure(&partial_scan_cl); | |
4580 | |
4581 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4582 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4583 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4584 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4585 |
342 | 4586 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4587 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4588 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4589 | |
4590 OopsInHeapRegionClosure *scan_root_cl; | |
4591 OopsInHeapRegionClosure *scan_perm_cl; | |
4592 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4593 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4594 scan_root_cl = &scan_mark_root_cl; |
4595 scan_perm_cl = &scan_mark_perm_cl; | |
4596 } else { | |
4597 scan_root_cl = &only_scan_root_cl; | |
4598 scan_perm_cl = &only_scan_perm_cl; | |
4599 } | |
4600 | |
4601 pss.start_strong_roots(); | |
4602 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4603 SharedHeap::SO_AllClasses, | |
4604 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4605 &push_heap_rs_cl, |
342 | 4606 scan_perm_cl, |
4607 i); | |
4608 pss.end_strong_roots(); | |
4609 { | |
4610 double start = os::elapsedTime(); | |
4611 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4612 evac.do_void(); | |
4613 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4614 double term_ms = pss.term_time()*1000.0; | |
4615 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4616 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4617 } |
1282 | 4618 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4619 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4620 | |
4621 // Clean up any par-expanded rem sets. | |
4622 HeapRegionRemSet::par_cleanup(); | |
4623 | |
4624 if (ParallelGCVerbose) { | |
1709 | 4625 MutexLocker x(stats_lock()); |
4626 pss.print_termination_stats(i); | |
342 | 4627 } |
4628 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4629 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4630 double end_time_ms = os::elapsedTime() * 1000.0; |
4631 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4632 } |
4633 }; | |
4634 | |
4635 // *** Common G1 Evacuation Stuff | |
4636 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4637 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4638 |
342 | 4639 void |
4640 G1CollectedHeap:: | |
4641 g1_process_strong_roots(bool collecting_perm_gen, | |
4642 SharedHeap::ScanningOption so, | |
4643 OopClosure* scan_non_heap_roots, | |
4644 OopsInHeapRegionClosure* scan_rs, | |
4645 OopsInGenClosure* scan_perm, | |
4646 int worker_i) { | |
4647 // First scan the strong roots, including the perm gen. | |
4648 double ext_roots_start = os::elapsedTime(); | |
4649 double closure_app_time_sec = 0.0; | |
4650 | |
4651 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4652 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4653 buf_scan_perm.set_generation(perm_gen()); | |
4654 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4655 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4656 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4657 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4658 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4659 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4660 collecting_perm_gen, so, |
342 | 4661 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4662 &eager_scan_code_roots, |
342 | 4663 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4664 |
342 | 4665 // Finish up any enqueued closure apps. |
4666 buf_scan_non_heap_roots.done(); | |
4667 buf_scan_perm.done(); | |
4668 double ext_roots_end = os::elapsedTime(); | |
4669 g1_policy()->reset_obj_copy_time(worker_i); | |
4670 double obj_copy_time_sec = | |
4671 buf_scan_non_heap_roots.closure_app_seconds() + | |
4672 buf_scan_perm.closure_app_seconds(); | |
4673 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4674 double ext_root_time_ms = | |
4675 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4676 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4677 | |
4678 // Scan strong roots in mark stack. | |
4679 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4680 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4681 } | |
4682 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4683 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4684 | |
4685 // XXX What should this be doing in the parallel case? | |
4686 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4687 // Now scan the complement of the collection set. | |
4688 if (scan_rs != NULL) { | |
4689 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4690 } | |
4691 // Finish with the ref_processor roots. | |
4692 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4693 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4694 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4695 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4696 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4697 ref_processor()->oops_do(scan_non_heap_roots); |
4698 } | |
4699 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4700 _process_strong_tasks->all_tasks_completed(); | |
4701 } | |
4702 | |
4703 void | |
4704 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4705 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4706 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4707 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4708 } |
4709 | |
4710 | |
4711 class SaveMarksClosure: public HeapRegionClosure { | |
4712 public: | |
4713 bool doHeapRegion(HeapRegion* r) { | |
4714 r->save_marks(); | |
4715 return false; | |
4716 } | |
4717 }; | |
4718 | |
4719 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4720 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4721 SaveMarksClosure sm; |
4722 heap_region_iterate(&sm); | |
4723 } | |
4724 // We do this even in the parallel case | |
4725 perm_gen()->save_marks(); | |
4726 } | |
4727 | |
4728 void G1CollectedHeap::evacuate_collection_set() { | |
4729 set_evacuation_failed(false); | |
4730 | |
4731 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4732 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4733 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4734 | |
342 | 4735 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4736 set_par_threads(n_workers); | |
4737 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4738 | |
4739 init_for_evac_failure(NULL); | |
4740 | |
4741 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4742 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4743 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4744 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4745 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4746 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4747 StrongRootsScope srs(this); |
1709 | 4748 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4749 workers()->run_task(&g1_par_task); |
4750 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4751 StrongRootsScope srs(this); |
342 | 4752 g1_par_task.work(0); |
4753 } | |
4754 | |
4755 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4756 g1_policy()->record_par_time(par_time); | |
4757 set_par_threads(0); | |
4758 // Is this the right thing to do here? We don't save marks | |
4759 // on individual heap regions when we allocate from | |
4760 // them in parallel, so this seems like the correct place for this. | |
545 | 4761 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4762 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4763 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4764 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4765 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4766 // here too. |
342 | 4767 { |
4768 G1IsAliveClosure is_alive(this); | |
4769 G1KeepAliveClosure keep_alive(this); | |
4770 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4771 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4772 release_gc_alloc_regions(false /* totally */); |
342 | 4773 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4774 |
889 | 4775 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4776 concurrent_g1_refine()->set_use_cache(true); |
4777 | |
4778 finalize_for_evac_failure(); | |
4779 | |
4780 // Must do this before removing self-forwarding pointers, which clears | |
4781 // the per-region evac-failure flags. | |
4782 concurrent_mark()->complete_marking_in_collection_set(); | |
4783 | |
4784 if (evacuation_failed()) { | |
4785 remove_self_forwarding_pointers(); | |
4786 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4787 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4788 } else if (PrintGC) { |
4789 gclog_or_tty->print("--"); | |
4790 } | |
4791 } | |
4792 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4793 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4794 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4795 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4796 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4797 |
4798 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4799 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4800 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4801 } |
342 | 4802 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4803 } | |
4804 | |
4805 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4806 size_t pre_used = 0; | |
4807 size_t cleared_h_regions = 0; | |
4808 size_t freed_regions = 0; | |
4809 UncleanRegionList local_list; | |
4810 | |
4811 HeapWord* start = hr->bottom(); | |
4812 HeapWord* end = hr->prev_top_at_mark_start(); | |
4813 size_t used_bytes = hr->used(); | |
4814 size_t live_bytes = hr->max_live_bytes(); | |
4815 if (used_bytes > 0) { | |
4816 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4817 } else { | |
4818 guarantee( live_bytes == 0, "invariant" ); | |
4819 } | |
4820 | |
4821 size_t garbage_bytes = used_bytes - live_bytes; | |
4822 if (garbage_bytes > 0) | |
4823 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4824 | |
4825 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4826 &local_list); | |
4827 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4828 &local_list); | |
4829 } | |
4830 | |
4831 void | |
4832 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4833 size_t& pre_used, | |
4834 size_t& cleared_h_regions, | |
4835 size_t& freed_regions, | |
4836 UncleanRegionList* list, | |
4837 bool par) { | |
4838 pre_used += hr->used(); | |
4839 if (hr->isHumongous()) { | |
4840 assert(hr->startsHumongous(), | |
4841 "Only the start of a humongous region should be freed."); | |
4842 int ind = _hrs->find(hr); | |
4843 assert(ind != -1, "Should have an index."); | |
4844 // Clear the start region. | |
4845 hr->hr_clear(par, true /*clear_space*/); | |
4846 list->insert_before_head(hr); | |
4847 cleared_h_regions++; | |
4848 freed_regions++; | |
4849 // Clear any continued regions. | |
4850 ind++; | |
4851 while ((size_t)ind < n_regions()) { | |
4852 HeapRegion* hrc = _hrs->at(ind); | |
4853 if (!hrc->continuesHumongous()) break; | |
4854 // Otherwise, does continue the H region. | |
4855 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4856 hrc->hr_clear(par, true /*clear_space*/); | |
4857 cleared_h_regions++; | |
4858 freed_regions++; | |
4859 list->insert_before_head(hrc); | |
4860 ind++; | |
4861 } | |
4862 } else { | |
4863 hr->hr_clear(par, true /*clear_space*/); | |
4864 list->insert_before_head(hr); | |
4865 freed_regions++; | |
4866 // If we're using clear2, this should not be enabled. | |
4867 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4868 } | |
4869 } | |
4870 | |
4871 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4872 size_t cleared_h_regions, | |
4873 size_t freed_regions, | |
4874 UncleanRegionList* list) { | |
4875 if (list != NULL && list->sz() > 0) { | |
4876 prepend_region_list_on_unclean_list(list); | |
4877 } | |
4878 // Acquire a lock, if we're parallel, to update possibly-shared | |
4879 // variables. | |
4880 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4881 { | |
4882 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4883 _summary_bytes_used -= pre_used; | |
4884 _num_humongous_regions -= (int) cleared_h_regions; | |
4885 _free_regions += freed_regions; | |
4886 } | |
4887 } | |
4888 | |
4889 | |
4890 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4891 while (list != NULL) { | |
4892 guarantee( list->is_young(), "invariant" ); | |
4893 | |
4894 HeapWord* bottom = list->bottom(); | |
4895 HeapWord* end = list->end(); | |
4896 MemRegion mr(bottom, end); | |
4897 ct_bs->dirty(mr); | |
4898 | |
4899 list = list->get_next_young_region(); | |
4900 } | |
4901 } | |
4902 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4903 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4904 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4905 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4906 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4907 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4908 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4909 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4910 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4911 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4912 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4913 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4914 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4915 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4916 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4917 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4918 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4919 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4920 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4921 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4922 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4923 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4924 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4925 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4926 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4927 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4928 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4929 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4930 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4931 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4932 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4933 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4934 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4935 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4936 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4937 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4938 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4939 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4940 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4941 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4942 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4943 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4944 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4945 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4946 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4947 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4948 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4949 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4950 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4951 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4952 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4953 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4954 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4955 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4956 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4957 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4958 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4959 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4960 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4961 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4962 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4963 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4964 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4965 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4966 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4967 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4968 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4969 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4970 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4971 |
342 | 4972 void G1CollectedHeap::cleanUpCardTable() { |
4973 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4974 double start = os::elapsedTime(); | |
4975 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4976 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4977 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4978 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4979 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4980 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4981 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4982 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4983 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4984 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4985 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4986 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4987 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4988 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4989 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4990 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4991 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4992 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4993 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4994 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4995 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4996 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4997 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4998 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4999 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5000 |
342 | 5001 double elapsed = os::elapsedTime() - start; |
5002 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5003 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5004 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5005 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5006 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5007 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5008 #endif |
342 | 5009 } |
5010 | |
5011 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
5012 double young_time_ms = 0.0; | |
5013 double non_young_time_ms = 0.0; | |
5014 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5015 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5016 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5017 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5018 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5019 |
342 | 5020 G1CollectorPolicy* policy = g1_policy(); |
5021 | |
5022 double start_sec = os::elapsedTime(); | |
5023 bool non_young = true; | |
5024 | |
5025 HeapRegion* cur = cs_head; | |
5026 int age_bound = -1; | |
5027 size_t rs_lengths = 0; | |
5028 | |
5029 while (cur != NULL) { | |
5030 if (non_young) { | |
5031 if (cur->is_young()) { | |
5032 double end_sec = os::elapsedTime(); | |
5033 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5034 non_young_time_ms += elapsed_ms; | |
5035 | |
5036 start_sec = os::elapsedTime(); | |
5037 non_young = false; | |
5038 } | |
5039 } else { | |
5040 if (!cur->is_on_free_list()) { | |
5041 double end_sec = os::elapsedTime(); | |
5042 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5043 young_time_ms += elapsed_ms; | |
5044 | |
5045 start_sec = os::elapsedTime(); | |
5046 non_young = true; | |
5047 } | |
5048 } | |
5049 | |
5050 rs_lengths += cur->rem_set()->occupied(); | |
5051 | |
5052 HeapRegion* next = cur->next_in_collection_set(); | |
5053 assert(cur->in_collection_set(), "bad CS"); | |
5054 cur->set_next_in_collection_set(NULL); | |
5055 cur->set_in_collection_set(false); | |
5056 | |
5057 if (cur->is_young()) { | |
5058 int index = cur->young_index_in_cset(); | |
5059 guarantee( index != -1, "invariant" ); | |
5060 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5061 size_t words_survived = _surviving_young_words[index]; | |
5062 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5063 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5064 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5065 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5066 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5067 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5068 cur->set_next_young_region(NULL); |
342 | 5069 } else { |
5070 int index = cur->young_index_in_cset(); | |
5071 guarantee( index == -1, "invariant" ); | |
5072 } | |
5073 | |
5074 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5075 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5076 "invariant" ); | |
5077 | |
5078 if (!cur->evacuation_failed()) { | |
5079 // And the region is empty. | |
5080 assert(!cur->is_empty(), | |
5081 "Should not have empty regions in a CS."); | |
5082 free_region(cur); | |
5083 } else { | |
5084 cur->uninstall_surv_rate_group(); | |
5085 if (cur->is_young()) | |
5086 cur->set_young_index_in_cset(-1); | |
5087 cur->set_not_young(); | |
5088 cur->set_evacuation_failed(false); | |
5089 } | |
5090 cur = next; | |
5091 } | |
5092 | |
5093 policy->record_max_rs_lengths(rs_lengths); | |
5094 policy->cset_regions_freed(); | |
5095 | |
5096 double end_sec = os::elapsedTime(); | |
5097 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5098 if (non_young) | |
5099 non_young_time_ms += elapsed_ms; | |
5100 else | |
5101 young_time_ms += elapsed_ms; | |
5102 | |
5103 policy->record_young_free_cset_time_ms(young_time_ms); | |
5104 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5105 } | |
5106 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5107 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5108 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5109 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5110 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5111 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5112 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5113 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5114 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5115 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5116 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5117 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5118 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5119 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5120 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5121 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5122 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5123 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5124 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5125 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5126 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5127 |
342 | 5128 HeapRegion* |
5129 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
5130 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5131 HeapRegion* res = pop_unclean_region_list_locked(); | |
5132 if (res != NULL) { | |
5133 assert(!res->continuesHumongous() && | |
5134 res->zero_fill_state() != HeapRegion::Allocated, | |
5135 "Only free regions on unclean list."); | |
5136 if (zero_filled) { | |
5137 res->ensure_zero_filled_locked(); | |
5138 res->set_zero_fill_allocated(); | |
5139 } | |
5140 } | |
5141 return res; | |
5142 } | |
5143 | |
5144 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
5145 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5146 return alloc_region_from_unclean_list_locked(zero_filled); | |
5147 } | |
5148 | |
5149 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
5150 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5151 put_region_on_unclean_list_locked(r); | |
5152 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
5153 } | |
5154 | |
5155 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
5156 MutexLockerEx x(Cleanup_mon); | |
5157 set_unclean_regions_coming_locked(b); | |
5158 } | |
5159 | |
5160 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
5161 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
5162 _unclean_regions_coming = b; | |
5163 // Wake up mutator threads that might be waiting for completeCleanup to | |
5164 // finish. | |
5165 if (!b) Cleanup_mon->notify_all(); | |
5166 } | |
5167 | |
5168 void G1CollectedHeap::wait_for_cleanup_complete() { | |
1973 | 5169 assert_not_at_safepoint(); |
342 | 5170 MutexLockerEx x(Cleanup_mon); |
5171 wait_for_cleanup_complete_locked(); | |
5172 } | |
5173 | |
5174 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
5175 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
5176 while (_unclean_regions_coming) { | |
5177 Cleanup_mon->wait(); | |
5178 } | |
5179 } | |
5180 | |
5181 void | |
5182 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
5183 assert(ZF_mon->owned_by_self(), "precondition."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5184 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5185 if (r->is_gc_alloc_region()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5186 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5187 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5188 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5189 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5190 region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5191 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5192 #endif |
342 | 5193 _unclean_region_list.insert_before_head(r); |
5194 } | |
5195 | |
5196 void | |
5197 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
5198 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5199 prepend_region_list_on_unclean_list_locked(list); | |
5200 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
5201 } | |
5202 | |
5203 void | |
5204 G1CollectedHeap:: | |
5205 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
5206 assert(ZF_mon->owned_by_self(), "precondition."); | |
5207 _unclean_region_list.prepend_list(list); | |
5208 } | |
5209 | |
5210 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
5211 assert(ZF_mon->owned_by_self(), "precondition."); | |
5212 HeapRegion* res = _unclean_region_list.pop(); | |
5213 if (res != NULL) { | |
5214 // Inform ZF thread that there's a new unclean head. | |
5215 if (_unclean_region_list.hd() != NULL && should_zf()) | |
5216 ZF_mon->notify_all(); | |
5217 } | |
5218 return res; | |
5219 } | |
5220 | |
5221 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
5222 assert(ZF_mon->owned_by_self(), "precondition."); | |
5223 return _unclean_region_list.hd(); | |
5224 } | |
5225 | |
5226 | |
5227 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
5228 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5229 HeapRegion* r = peek_unclean_region_list_locked(); | |
5230 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
5231 // Result of below must be equal to "r", since we hold the lock. | |
5232 (void)pop_unclean_region_list_locked(); | |
5233 put_free_region_on_list_locked(r); | |
5234 return true; | |
5235 } else { | |
5236 return false; | |
5237 } | |
5238 } | |
5239 | |
5240 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
5241 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5242 return move_cleaned_region_to_free_list_locked(); | |
5243 } | |
5244 | |
5245 | |
5246 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
5247 assert(ZF_mon->owned_by_self(), "precondition."); | |
5248 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5249 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
5250 "Regions on free list must be zero filled"); | |
5251 assert(!r->isHumongous(), "Must not be humongous."); | |
5252 assert(r->is_empty(), "Better be empty"); | |
5253 assert(!r->is_on_free_list(), | |
5254 "Better not already be on free list"); | |
5255 assert(!r->is_on_unclean_list(), | |
5256 "Better not already be on unclean list"); | |
5257 r->set_on_free_list(true); | |
5258 r->set_next_on_free_list(_free_region_list); | |
5259 _free_region_list = r; | |
5260 _free_region_list_size++; | |
5261 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5262 } | |
5263 | |
5264 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
5265 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5266 put_free_region_on_list_locked(r); | |
5267 } | |
5268 | |
5269 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
5270 assert(ZF_mon->owned_by_self(), "precondition."); | |
5271 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5272 HeapRegion* res = _free_region_list; | |
5273 if (res != NULL) { | |
5274 _free_region_list = res->next_from_free_list(); | |
5275 _free_region_list_size--; | |
5276 res->set_on_free_list(false); | |
5277 res->set_next_on_free_list(NULL); | |
5278 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5279 } | |
5280 return res; | |
5281 } | |
5282 | |
5283 | |
5284 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
5285 // By self, or on behalf of self. | |
5286 assert(Heap_lock->is_locked(), "Precondition"); | |
5287 HeapRegion* res = NULL; | |
5288 bool first = true; | |
5289 while (res == NULL) { | |
5290 if (zero_filled || !first) { | |
5291 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5292 res = pop_free_region_list_locked(); | |
5293 if (res != NULL) { | |
5294 assert(!res->zero_fill_is_allocated(), | |
5295 "No allocated regions on free list."); | |
5296 res->set_zero_fill_allocated(); | |
5297 } else if (!first) { | |
5298 break; // We tried both, time to return NULL. | |
5299 } | |
5300 } | |
5301 | |
5302 if (res == NULL) { | |
5303 res = alloc_region_from_unclean_list(zero_filled); | |
5304 } | |
5305 assert(res == NULL || | |
5306 !zero_filled || | |
5307 res->zero_fill_is_allocated(), | |
5308 "We must have allocated the region we're returning"); | |
5309 first = false; | |
5310 } | |
5311 return res; | |
5312 } | |
5313 | |
5314 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
5315 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5316 { | |
5317 HeapRegion* prev = NULL; | |
5318 HeapRegion* cur = _unclean_region_list.hd(); | |
5319 while (cur != NULL) { | |
5320 HeapRegion* next = cur->next_from_unclean_list(); | |
5321 if (cur->zero_fill_is_allocated()) { | |
5322 // Remove from the list. | |
5323 if (prev == NULL) { | |
5324 (void)_unclean_region_list.pop(); | |
5325 } else { | |
5326 _unclean_region_list.delete_after(prev); | |
5327 } | |
5328 cur->set_on_unclean_list(false); | |
5329 cur->set_next_on_unclean_list(NULL); | |
5330 } else { | |
5331 prev = cur; | |
5332 } | |
5333 cur = next; | |
5334 } | |
5335 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
5336 "Inv"); | |
5337 } | |
5338 | |
5339 { | |
5340 HeapRegion* prev = NULL; | |
5341 HeapRegion* cur = _free_region_list; | |
5342 while (cur != NULL) { | |
5343 HeapRegion* next = cur->next_from_free_list(); | |
5344 if (cur->zero_fill_is_allocated()) { | |
5345 // Remove from the list. | |
5346 if (prev == NULL) { | |
5347 _free_region_list = cur->next_from_free_list(); | |
5348 } else { | |
5349 prev->set_next_on_free_list(cur->next_from_free_list()); | |
5350 } | |
5351 cur->set_on_free_list(false); | |
5352 cur->set_next_on_free_list(NULL); | |
5353 _free_region_list_size--; | |
5354 } else { | |
5355 prev = cur; | |
5356 } | |
5357 cur = next; | |
5358 } | |
5359 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5360 } | |
5361 } | |
5362 | |
5363 bool G1CollectedHeap::verify_region_lists() { | |
5364 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5365 return verify_region_lists_locked(); | |
5366 } | |
5367 | |
5368 bool G1CollectedHeap::verify_region_lists_locked() { | |
5369 HeapRegion* unclean = _unclean_region_list.hd(); | |
5370 while (unclean != NULL) { | |
5371 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
5372 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
5373 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
5374 "Everything else is possible."); | |
5375 unclean = unclean->next_from_unclean_list(); | |
5376 } | |
5377 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
5378 | |
5379 HeapRegion* free_r = _free_region_list; | |
5380 while (free_r != NULL) { | |
5381 assert(free_r->is_on_free_list(), "Well, it is!"); | |
5382 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
5383 switch (free_r->zero_fill_state()) { | |
5384 case HeapRegion::NotZeroFilled: | |
5385 case HeapRegion::ZeroFilling: | |
5386 guarantee(false, "Should not be on free list."); | |
5387 break; | |
5388 default: | |
5389 // Everything else is possible. | |
5390 break; | |
5391 } | |
5392 free_r = free_r->next_from_free_list(); | |
5393 } | |
5394 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
5395 // If we didn't do an assertion... | |
5396 return true; | |
5397 } | |
5398 | |
5399 size_t G1CollectedHeap::free_region_list_length() { | |
5400 assert(ZF_mon->owned_by_self(), "precondition."); | |
5401 size_t len = 0; | |
5402 HeapRegion* cur = _free_region_list; | |
5403 while (cur != NULL) { | |
5404 len++; | |
5405 cur = cur->next_from_free_list(); | |
5406 } | |
5407 return len; | |
5408 } | |
5409 | |
5410 size_t G1CollectedHeap::unclean_region_list_length() { | |
5411 assert(ZF_mon->owned_by_self(), "precondition."); | |
5412 return _unclean_region_list.length(); | |
5413 } | |
5414 | |
5415 size_t G1CollectedHeap::n_regions() { | |
5416 return _hrs->length(); | |
5417 } | |
5418 | |
5419 size_t G1CollectedHeap::max_regions() { | |
5420 return | |
5421 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
5422 HeapRegion::GrainBytes; | |
5423 } | |
5424 | |
5425 size_t G1CollectedHeap::free_regions() { | |
5426 /* Possibly-expensive assert. | |
5427 assert(_free_regions == count_free_regions(), | |
5428 "_free_regions is off."); | |
5429 */ | |
5430 return _free_regions; | |
5431 } | |
5432 | |
5433 bool G1CollectedHeap::should_zf() { | |
5434 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
5435 } | |
5436 | |
5437 class RegionCounter: public HeapRegionClosure { | |
5438 size_t _n; | |
5439 public: | |
5440 RegionCounter() : _n(0) {} | |
5441 bool doHeapRegion(HeapRegion* r) { | |
677 | 5442 if (r->is_empty()) { |
342 | 5443 assert(!r->isHumongous(), "H regions should not be empty."); |
5444 _n++; | |
5445 } | |
5446 return false; | |
5447 } | |
5448 int res() { return (int) _n; } | |
5449 }; | |
5450 | |
5451 size_t G1CollectedHeap::count_free_regions() { | |
5452 RegionCounter rc; | |
5453 heap_region_iterate(&rc); | |
5454 size_t n = rc.res(); | |
5455 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
5456 n--; | |
5457 return n; | |
5458 } | |
5459 | |
5460 size_t G1CollectedHeap::count_free_regions_list() { | |
5461 size_t n = 0; | |
5462 size_t o = 0; | |
5463 ZF_mon->lock_without_safepoint_check(); | |
5464 HeapRegion* cur = _free_region_list; | |
5465 while (cur != NULL) { | |
5466 cur = cur->next_from_free_list(); | |
5467 n++; | |
5468 } | |
5469 size_t m = unclean_region_list_length(); | |
5470 ZF_mon->unlock(); | |
5471 return n + m; | |
5472 } | |
5473 | |
5474 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5475 assert(heap_lock_held_for_gc(), | |
5476 "the heap lock should already be held by or for this thread"); | |
5477 _young_list->push_region(hr); | |
5478 g1_policy()->set_region_short_lived(hr); | |
5479 } | |
5480 | |
5481 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5482 private: | |
5483 bool _success; | |
5484 public: | |
5485 NoYoungRegionsClosure() : _success(true) { } | |
5486 bool doHeapRegion(HeapRegion* r) { | |
5487 if (r->is_young()) { | |
5488 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5489 r->bottom(), r->end()); | |
5490 _success = false; | |
5491 } | |
5492 return false; | |
5493 } | |
5494 bool success() { return _success; } | |
5495 }; | |
5496 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5497 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5498 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5499 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5500 if (check_heap) { |
342 | 5501 NoYoungRegionsClosure closure; |
5502 heap_region_iterate(&closure); | |
5503 ret = ret && closure.success(); | |
5504 } | |
5505 | |
5506 return ret; | |
5507 } | |
5508 | |
5509 void G1CollectedHeap::empty_young_list() { | |
5510 assert(heap_lock_held_for_gc(), | |
5511 "the heap lock should already be held by or for this thread"); | |
5512 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5513 | |
5514 _young_list->empty_list(); | |
5515 } | |
5516 | |
5517 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5518 bool no_allocs = true; | |
5519 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5520 HeapRegion* r = _gc_alloc_regions[ap]; | |
5521 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5522 } | |
5523 return no_allocs; | |
5524 } | |
5525 | |
545 | 5526 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5527 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5528 HeapRegion* r = _gc_alloc_regions[ap]; | |
5529 if (r != NULL) { | |
5530 // Check for aliases. | |
5531 bool has_processed_alias = false; | |
5532 for (int i = 0; i < ap; ++i) { | |
5533 if (_gc_alloc_regions[i] == r) { | |
5534 has_processed_alias = true; | |
5535 break; | |
5536 } | |
5537 } | |
5538 if (!has_processed_alias) { | |
545 | 5539 retire_alloc_region(r, false /* par */); |
342 | 5540 } |
5541 } | |
5542 } | |
5543 } | |
5544 | |
5545 | |
5546 // Done at the start of full GC. | |
5547 void G1CollectedHeap::tear_down_region_lists() { | |
5548 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5549 while (pop_unclean_region_list_locked() != NULL) ; | |
5550 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1394
diff
changeset
|
5551 "Postconditions of loop."); |
342 | 5552 while (pop_free_region_list_locked() != NULL) ; |
5553 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5554 if (_free_region_list_size != 0) { | |
5555 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
5556 print_on(gclog_or_tty, true /* extended */); |
342 | 5557 } |
5558 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5559 } | |
5560 | |
5561 | |
5562 class RegionResetter: public HeapRegionClosure { | |
5563 G1CollectedHeap* _g1; | |
5564 int _n; | |
5565 public: | |
5566 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5567 bool doHeapRegion(HeapRegion* r) { | |
5568 if (r->continuesHumongous()) return false; | |
5569 if (r->top() > r->bottom()) { | |
5570 if (r->top() < r->end()) { | |
5571 Copy::fill_to_words(r->top(), | |
5572 pointer_delta(r->end(), r->top())); | |
5573 } | |
5574 r->set_zero_fill_allocated(); | |
5575 } else { | |
5576 assert(r->is_empty(), "tautology"); | |
677 | 5577 _n++; |
5578 switch (r->zero_fill_state()) { | |
342 | 5579 case HeapRegion::NotZeroFilled: |
5580 case HeapRegion::ZeroFilling: | |
5581 _g1->put_region_on_unclean_list_locked(r); | |
5582 break; | |
5583 case HeapRegion::Allocated: | |
5584 r->set_zero_fill_complete(); | |
5585 // no break; go on to put on free list. | |
5586 case HeapRegion::ZeroFilled: | |
5587 _g1->put_free_region_on_list_locked(r); | |
5588 break; | |
5589 } | |
5590 } | |
5591 return false; | |
5592 } | |
5593 | |
5594 int getFreeRegionCount() {return _n;} | |
5595 }; | |
5596 | |
5597 // Done at the end of full GC. | |
5598 void G1CollectedHeap::rebuild_region_lists() { | |
5599 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5600 // This needs to go at the end of the full GC. | |
5601 RegionResetter rs; | |
5602 heap_region_iterate(&rs); | |
5603 _free_regions = rs.getFreeRegionCount(); | |
5604 // Tell the ZF thread it may have work to do. | |
5605 if (should_zf()) ZF_mon->notify_all(); | |
5606 } | |
5607 | |
5608 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5609 G1CollectedHeap* _g1; | |
5610 int _n; | |
5611 public: | |
5612 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5613 bool doHeapRegion(HeapRegion* r) { | |
5614 if (r->continuesHumongous()) return false; | |
5615 if (r->top() > r->bottom()) { | |
5616 // There are assertions in "set_zero_fill_needed()" below that | |
5617 // require top() == bottom(), so this is technically illegal. | |
5618 // We'll skirt the law here, by making that true temporarily. | |
5619 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5620 r->set_top(r->bottom())); | |
5621 r->set_zero_fill_needed(); | |
5622 DEBUG_ONLY(r->set_top(save_top)); | |
5623 } | |
5624 return false; | |
5625 } | |
5626 }; | |
5627 | |
5628 // Done at the start of full GC. | |
5629 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5630 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5631 // This needs to go at the end of the full GC. | |
5632 UsedRegionsNeedZeroFillSetter rs; | |
5633 heap_region_iterate(&rs); | |
5634 } | |
5635 | |
5636 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5637 _refine_cte_cl->set_concurrent(concurrent); | |
5638 } | |
5639 | |
5640 #ifndef PRODUCT | |
5641 | |
5642 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5643 public: | |
5644 bool doHeapRegion(HeapRegion *r) { | |
5645 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5646 if (r != NULL) { | |
5647 if (r->is_on_free_list()) | |
5648 gclog_or_tty->print("Free "); | |
5649 if (r->is_young()) | |
5650 gclog_or_tty->print("Young "); | |
5651 if (r->isHumongous()) | |
5652 gclog_or_tty->print("Is Humongous "); | |
5653 r->print(); | |
5654 } | |
5655 return false; | |
5656 } | |
5657 }; | |
5658 | |
5659 class SortHeapRegionClosure : public HeapRegionClosure { | |
5660 size_t young_regions,free_regions, unclean_regions; | |
5661 size_t hum_regions, count; | |
5662 size_t unaccounted, cur_unclean, cur_alloc; | |
5663 size_t total_free; | |
5664 HeapRegion* cur; | |
5665 public: | |
5666 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5667 free_regions(0), unclean_regions(0), | |
5668 hum_regions(0), | |
5669 count(0), unaccounted(0), | |
5670 cur_alloc(0), total_free(0) | |
5671 {} | |
5672 bool doHeapRegion(HeapRegion *r) { | |
5673 count++; | |
5674 if (r->is_on_free_list()) free_regions++; | |
5675 else if (r->is_on_unclean_list()) unclean_regions++; | |
5676 else if (r->isHumongous()) hum_regions++; | |
5677 else if (r->is_young()) young_regions++; | |
5678 else if (r == cur) cur_alloc++; | |
5679 else unaccounted++; | |
5680 return false; | |
5681 } | |
5682 void print() { | |
5683 total_free = free_regions + unclean_regions; | |
5684 gclog_or_tty->print("%d regions\n", count); | |
5685 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5686 total_free, free_regions, unclean_regions); | |
5687 gclog_or_tty->print("%d humongous %d young\n", | |
5688 hum_regions, young_regions); | |
5689 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5690 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5691 } | |
5692 }; | |
5693 | |
5694 void G1CollectedHeap::print_region_counts() { | |
5695 SortHeapRegionClosure sc(_cur_alloc_region); | |
5696 PrintHeapRegionClosure cl; | |
5697 heap_region_iterate(&cl); | |
5698 heap_region_iterate(&sc); | |
5699 sc.print(); | |
5700 print_region_accounting_info(); | |
5701 }; | |
5702 | |
5703 bool G1CollectedHeap::regions_accounted_for() { | |
5704 // TODO: regions accounting for young/survivor/tenured | |
5705 return true; | |
5706 } | |
5707 | |
5708 bool G1CollectedHeap::print_region_accounting_info() { | |
5709 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5710 free_regions(), | |
5711 count_free_regions(), count_free_regions_list(), | |
5712 _free_region_list_size, _unclean_region_list.sz()); | |
5713 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5714 (_cur_alloc_region == NULL ? 0 : 1)); | |
5715 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5716 | |
5717 // TODO: check regions accounting for young/survivor/tenured | |
5718 return true; | |
5719 } | |
5720 | |
5721 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5722 HeapRegion* hr = heap_region_containing(p); | |
5723 if (hr == NULL) { | |
5724 return is_in_permanent(p); | |
5725 } else { | |
5726 return hr->is_in(p); | |
5727 } | |
5728 } | |
941 | 5729 #endif // !PRODUCT |
342 | 5730 |
5731 void G1CollectedHeap::g1_unimplemented() { | |
5732 // Unimplemented(); | |
5733 } |