Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 2030:fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
Summary: The concurrent marking thread can complete its operation and increment the full GC counter during a Full GC. This causes the nesting of increments to the start and end of Full GCs that we are expecting to be wrong. the fix is for the marking thread to join the suspendible thread set before incrementing the counter so that it's blocked until the Full GC (or any other safepoint) is finished. The change also includes some minor code cleanup (I renamed a parameter).
Reviewed-by: brutisso, ysr
author | tonyp |
---|---|
date | Tue, 14 Dec 2010 16:19:44 -0500 |
parents | 8df09fb45352 |
children | b03260081e9b |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
31 #include "gc_implementation/g1/concurrentZFThread.hpp" | |
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _czft->stop(); |
429 _cmThread->stop(); | |
430 } | |
431 | |
432 | |
433 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
434 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
435 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
436 | |
437 // Count the dirty cards at the start. | |
438 CountNonCleanMemRegionClosure count1(this); | |
439 ct_bs->mod_card_iterate(&count1); | |
440 int orig_count = count1.n(); | |
441 | |
442 // First clear the logged cards. | |
443 ClearLoggedCardTableEntryClosure clear; | |
444 dcqs.set_closure(&clear); | |
445 dcqs.apply_closure_to_all_completed_buffers(); | |
446 dcqs.iterate_closure_all_threads(false); | |
447 clear.print_histo(); | |
448 | |
449 // Now ensure that there's no dirty cards. | |
450 CountNonCleanMemRegionClosure count2(this); | |
451 ct_bs->mod_card_iterate(&count2); | |
452 if (count2.n() != 0) { | |
453 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
454 count2.n(), orig_count); | |
455 } | |
456 guarantee(count2.n() == 0, "Card table should be clean."); | |
457 | |
458 RedirtyLoggedCardTableEntryClosure redirty; | |
459 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
460 dcqs.apply_closure_to_all_completed_buffers(); | |
461 dcqs.iterate_closure_all_threads(false); | |
462 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
463 clear.calls(), orig_count); | |
464 guarantee(redirty.calls() == clear.calls(), | |
465 "Or else mechanism is broken."); | |
466 | |
467 CountNonCleanMemRegionClosure count3(this); | |
468 ct_bs->mod_card_iterate(&count3); | |
469 if (count3.n() != orig_count) { | |
470 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
471 orig_count, count3.n()); | |
472 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
473 } | |
474 | |
475 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
476 } | |
477 | |
478 // Private class members. | |
479 | |
480 G1CollectedHeap* G1CollectedHeap::_g1h; | |
481 | |
482 // Private methods. | |
483 | |
484 // Finds a HeapRegion that can be used to allocate a given size of block. | |
485 | |
486 | |
487 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
488 bool do_expand, | |
489 bool zero_filled) { | |
490 ConcurrentZFThread::note_region_alloc(); | |
491 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
492 if (res == NULL && do_expand) { | |
493 expand(word_size * HeapWordSize); | |
494 res = alloc_free_region_from_lists(zero_filled); | |
495 assert(res == NULL || | |
496 (!res->isHumongous() && | |
497 (!zero_filled || | |
498 res->zero_fill_state() == HeapRegion::Allocated)), | |
499 "Alloc Regions must be zero filled (and non-H)"); | |
500 } | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
501 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
502 if (res->is_empty()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
503 _free_regions--; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
504 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
505 assert(!res->isHumongous() && |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
506 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
507 err_msg("Non-young alloc Regions must be zero filled (and non-H):" |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
508 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
509 res->isHumongous(), zero_filled, res->zero_fill_state())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
510 assert(!res->is_on_unclean_list(), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
511 "Alloc Regions must not be on the unclean list"); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
512 if (G1PrintHeapRegions) { |
342 | 513 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
514 "top "PTR_FORMAT, | |
515 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
516 } | |
517 } | |
518 return res; | |
519 } | |
520 | |
521 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
522 size_t word_size, | |
523 bool zero_filled) { | |
524 HeapRegion* alloc_region = NULL; | |
525 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
526 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
527 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 528 alloc_region->set_survivor(); |
342 | 529 } |
530 ++_gc_alloc_region_counts[purpose]; | |
531 } else { | |
532 g1_policy()->note_alloc_region_limit_reached(purpose); | |
533 } | |
534 return alloc_region; | |
535 } | |
536 | |
537 // If could fit into free regions w/o expansion, try. | |
538 // Otherwise, if can expand, do so. | |
539 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 540 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
541 assert_heap_locked_or_at_safepoint(); | |
342 | 542 assert(regions_accounted_for(), "Region leakage!"); |
543 | |
1973 | 544 // We can't allocate humongous regions while cleanupComplete is |
545 // running, since some of the regions we find to be empty might not | |
546 // yet be added to the unclean list. If we're already at a | |
547 // safepoint, this call is unnecessary, not to mention wrong. | |
548 if (!SafepointSynchronize::is_at_safepoint()) { | |
342 | 549 wait_for_cleanup_complete(); |
1973 | 550 } |
342 | 551 |
552 size_t num_regions = | |
1973 | 553 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 554 |
555 // Special case if < one region??? | |
556 | |
557 // Remember the ft size. | |
558 size_t x_size = expansion_regions(); | |
559 | |
560 HeapWord* res = NULL; | |
561 bool eliminated_allocated_from_lists = false; | |
562 | |
563 // Can the allocation potentially fit in the free regions? | |
564 if (free_regions() >= num_regions) { | |
565 res = _hrs->obj_allocate(word_size); | |
566 } | |
567 if (res == NULL) { | |
568 // Try expansion. | |
569 size_t fs = _hrs->free_suffix(); | |
570 if (fs + x_size >= num_regions) { | |
571 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
572 res = _hrs->obj_allocate(word_size); | |
573 assert(res != NULL, "This should have worked."); | |
574 } else { | |
575 // Expansion won't help. Are there enough free regions if we get rid | |
576 // of reservations? | |
577 size_t avail = free_regions(); | |
578 if (avail >= num_regions) { | |
579 res = _hrs->obj_allocate(word_size); | |
580 if (res != NULL) { | |
581 remove_allocated_regions_from_lists(); | |
582 eliminated_allocated_from_lists = true; | |
583 } | |
584 } | |
585 } | |
586 } | |
587 if (res != NULL) { | |
588 // Increment by the number of regions allocated. | |
589 // FIXME: Assumes regions all of size GrainBytes. | |
590 #ifndef PRODUCT | |
591 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
592 HeapRegion::GrainWords)); | |
593 #endif | |
594 if (!eliminated_allocated_from_lists) | |
595 remove_allocated_regions_from_lists(); | |
596 _summary_bytes_used += word_size * HeapWordSize; | |
597 _free_regions -= num_regions; | |
598 _num_humongous_regions += (int) num_regions; | |
599 } | |
600 assert(regions_accounted_for(), "Region Leakage"); | |
601 return res; | |
602 } | |
603 | |
1973 | 604 void |
605 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { | |
606 // The cleanup operation might update _summary_bytes_used | |
607 // concurrently with this method. So, right now, if we don't wait | |
608 // for it to complete, updates to _summary_bytes_used might get | |
609 // lost. This will be resolved in the near future when the operation | |
610 // of the free region list is revamped as part of CR 6977804. | |
611 wait_for_cleanup_complete(); | |
612 | |
613 retire_cur_alloc_region_common(cur_alloc_region); | |
614 assert(_cur_alloc_region == NULL, "post-condition"); | |
615 } | |
616 | |
617 // See the comment in the .hpp file about the locking protocol and | |
618 // assumptions of this method (and other related ones). | |
342 | 619 HeapWord* |
1973 | 620 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, |
621 bool at_safepoint, | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
622 bool do_dirtying, |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
623 bool can_expand) { |
1973 | 624 assert_heap_locked_or_at_safepoint(); |
625 assert(_cur_alloc_region == NULL, | |
626 "replace_cur_alloc_region_and_allocate() should only be called " | |
627 "after retiring the previous current alloc region"); | |
628 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
629 "at_safepoint and is_at_safepoint() should be a tautology"); | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
630 assert(!can_expand || g1_policy()->can_expand_young_list(), |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
631 "we should not call this method with can_expand == true if " |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
632 "we are not allowed to expand the young gen"); |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
633 |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
634 if (can_expand || !g1_policy()->is_young_list_full()) { |
1973 | 635 if (!at_safepoint) { |
636 // The cleanup operation might update _summary_bytes_used | |
637 // concurrently with this method. So, right now, if we don't | |
638 // wait for it to complete, updates to _summary_bytes_used might | |
639 // get lost. This will be resolved in the near future when the | |
640 // operation of the free region list is revamped as part of | |
641 // CR 6977804. If we're already at a safepoint, this call is | |
642 // unnecessary, not to mention wrong. | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
643 wait_for_cleanup_complete(); |
342 | 644 } |
1973 | 645 |
646 HeapRegion* new_cur_alloc_region = newAllocRegion(word_size, | |
647 false /* zero_filled */); | |
648 if (new_cur_alloc_region != NULL) { | |
649 assert(new_cur_alloc_region->is_empty(), | |
650 "the newly-allocated region should be empty, " | |
651 "as right now we only allocate new regions out of the free list"); | |
652 g1_policy()->update_region_num(true /* next_is_young */); | |
653 _summary_bytes_used -= new_cur_alloc_region->used(); | |
654 set_region_short_lived_locked(new_cur_alloc_region); | |
655 | |
656 assert(!new_cur_alloc_region->isHumongous(), | |
657 "Catch a regression of this bug."); | |
658 | |
659 // We need to ensure that the stores to _cur_alloc_region and, | |
660 // subsequently, to top do not float above the setting of the | |
661 // young type. | |
662 OrderAccess::storestore(); | |
663 | |
664 // Now allocate out of the new current alloc region. We could | |
665 // have re-used allocate_from_cur_alloc_region() but its | |
666 // operation is slightly different to what we need here. First, | |
667 // allocate_from_cur_alloc_region() is only called outside a | |
668 // safepoint and will always unlock the Heap_lock if it returns | |
669 // a non-NULL result. Second, it assumes that the current alloc | |
670 // region is what's already assigned in _cur_alloc_region. What | |
671 // we want here is to actually do the allocation first before we | |
672 // assign the new region to _cur_alloc_region. This ordering is | |
673 // not currently important, but it will be essential when we | |
674 // change the code to support CAS allocation in the future (see | |
675 // CR 6994297). | |
676 // | |
677 // This allocate method does BOT updates and we don't need them in | |
678 // the young generation. This will be fixed in the near future by | |
679 // CR 6994297. | |
680 HeapWord* result = new_cur_alloc_region->allocate(word_size); | |
681 assert(result != NULL, "we just allocate out of an empty region " | |
682 "so allocation should have been successful"); | |
683 assert(is_in(result), "result should be in the heap"); | |
684 | |
685 _cur_alloc_region = new_cur_alloc_region; | |
686 | |
687 if (!at_safepoint) { | |
688 Heap_lock->unlock(); | |
689 } | |
690 | |
691 // do the dirtying, if necessary, after we release the Heap_lock | |
692 if (do_dirtying) { | |
693 dirty_young_block(result, word_size); | |
694 } | |
695 return result; | |
696 } | |
697 } | |
698 | |
699 assert(_cur_alloc_region == NULL, "we failed to allocate a new current " | |
700 "alloc region, it should still be NULL"); | |
701 assert_heap_locked_or_at_safepoint(); | |
702 return NULL; | |
703 } | |
704 | |
705 // See the comment in the .hpp file about the locking protocol and | |
706 // assumptions of this method (and other related ones). | |
707 HeapWord* | |
708 G1CollectedHeap::attempt_allocation_slow(size_t word_size) { | |
709 assert_heap_locked_and_not_at_safepoint(); | |
710 assert(!isHumongous(word_size), "attempt_allocation_slow() should not be " | |
711 "used for humongous allocations"); | |
712 | |
713 // We will loop while succeeded is false, which means that we tried | |
714 // to do a collection, but the VM op did not succeed. So, when we | |
715 // exit the loop, either one of the allocation attempts was | |
716 // successful, or we succeeded in doing the VM op but which was | |
717 // unable to allocate after the collection. | |
718 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
719 bool succeeded = true; | |
720 | |
721 { | |
722 // We may have concurrent cleanup working at the time. Wait for | |
723 // it to complete. In the future we would probably want to make | |
724 // the concurrent cleanup truly concurrent by decoupling it from | |
725 // the allocation. This will happen in the near future as part | |
726 // of CR 6977804 which will revamp the operation of the free | |
727 // region list. The fact that wait_for_cleanup_complete() will | |
728 // do a wait() means that we'll give up the Heap_lock. So, it's | |
729 // possible that when we exit wait_for_cleanup_complete() we | |
730 // might be able to allocate successfully (since somebody else | |
731 // might have done a collection meanwhile). So, we'll attempt to | |
732 // allocate again, just in case. When we make cleanup truly | |
733 // concurrent with allocation, we should remove this allocation | |
734 // attempt as it's redundant (we only reach here after an | |
735 // allocation attempt has been unsuccessful). | |
736 wait_for_cleanup_complete(); | |
737 HeapWord* result = attempt_allocation(word_size); | |
738 if (result != NULL) { | |
739 assert_heap_not_locked(); | |
740 return result; | |
342 | 741 } |
742 } | |
1973 | 743 |
744 if (GC_locker::is_active_and_needs_gc()) { | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
745 // We are locked out of GC because of the GC locker. We can |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
746 // allocate a new region only if we can expand the young gen. |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
747 |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
748 if (g1_policy()->can_expand_young_list()) { |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
749 // Yes, we are allowed to expand the young gen. Let's try to |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
750 // allocate a new current alloc region. |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
751 |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
752 HeapWord* result = |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
753 replace_cur_alloc_region_and_allocate(word_size, |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
754 false, /* at_safepoint */ |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
755 true, /* do_dirtying */ |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
756 true /* can_expand */); |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
757 if (result != NULL) { |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
758 assert_heap_not_locked(); |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
759 return result; |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
760 } |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
761 } |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
762 // We could not expand the young gen further (or we could but we |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
763 // failed to allocate a new region). We'll stall until the GC |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
764 // locker forces a GC. |
1973 | 765 |
766 // If this thread is not in a jni critical section, we stall | |
767 // the requestor until the critical section has cleared and | |
768 // GC allowed. When the critical section clears, a GC is | |
769 // initiated by the last thread exiting the critical section; so | |
770 // we retry the allocation sequence from the beginning of the loop, | |
771 // rather than causing more, now probably unnecessary, GC attempts. | |
772 JavaThread* jthr = JavaThread::current(); | |
773 assert(jthr != NULL, "sanity"); | |
774 if (!jthr->in_critical()) { | |
775 MutexUnlocker mul(Heap_lock); | |
776 GC_locker::stall_until_clear(); | |
777 | |
778 // We'll then fall off the end of the ("if GC locker active") | |
779 // if-statement and retry the allocation further down in the | |
780 // loop. | |
781 } else { | |
782 if (CheckJNICalls) { | |
783 fatal("Possible deadlock due to allocating while" | |
784 " in jni critical section"); | |
785 } | |
786 return NULL; | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
787 } |
1973 | 788 } else { |
789 // We are not locked out. So, let's try to do a GC. The VM op | |
790 // will retry the allocation before it completes. | |
791 | |
792 // Read the GC count while holding the Heap_lock | |
793 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
794 | |
795 Heap_lock->unlock(); | |
796 | |
797 HeapWord* result = | |
798 do_collection_pause(word_size, gc_count_before, &succeeded); | |
799 assert_heap_not_locked(); | |
800 if (result != NULL) { | |
801 assert(succeeded, "the VM op should have succeeded"); | |
802 | |
803 // Allocations that take place on VM operations do not do any | |
804 // card dirtying and we have to do it here. | |
805 dirty_young_block(result, word_size); | |
806 return result; | |
807 } | |
808 | |
809 Heap_lock->lock(); | |
810 } | |
811 | |
812 assert_heap_locked(); | |
813 | |
814 // We can reach here when we were unsuccessful in doing a GC, | |
815 // because another thread beat us to it, or because we were locked | |
816 // out of GC due to the GC locker. In either case a new alloc | |
817 // region might be available so we will retry the allocation. | |
818 HeapWord* result = attempt_allocation(word_size); | |
819 if (result != NULL) { | |
820 assert_heap_not_locked(); | |
821 return result; | |
822 } | |
823 | |
824 // So far our attempts to allocate failed. The only time we'll go | |
825 // around the loop and try again is if we tried to do a GC and the | |
826 // VM op that we tried to schedule was not successful because | |
827 // another thread beat us to it. If that happened it's possible | |
828 // that by the time we grabbed the Heap_lock again and tried to | |
829 // allocate other threads filled up the young generation, which | |
830 // means that the allocation attempt after the GC also failed. So, | |
831 // it's worth trying to schedule another GC pause. | |
832 if (succeeded) { | |
833 break; | |
834 } | |
835 | |
836 // Give a warning if we seem to be looping forever. | |
837 if ((QueuedAllocationWarningCount > 0) && | |
838 (try_count % QueuedAllocationWarningCount == 0)) { | |
839 warning("G1CollectedHeap::attempt_allocation_slow() " | |
840 "retries %d times", try_count); | |
342 | 841 } |
842 } | |
843 | |
1973 | 844 assert_heap_locked(); |
845 return NULL; | |
846 } | |
847 | |
848 // See the comment in the .hpp file about the locking protocol and | |
849 // assumptions of this method (and other related ones). | |
850 HeapWord* | |
851 G1CollectedHeap::attempt_allocation_humongous(size_t word_size, | |
852 bool at_safepoint) { | |
853 // This is the method that will allocate a humongous object. All | |
854 // allocation paths that attempt to allocate a humongous object | |
855 // should eventually reach here. Currently, the only paths are from | |
856 // mem_allocate() and attempt_allocation_at_safepoint(). | |
857 assert_heap_locked_or_at_safepoint(); | |
858 assert(isHumongous(word_size), "attempt_allocation_humongous() " | |
859 "should only be used for humongous allocations"); | |
860 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
861 "at_safepoint and is_at_safepoint() should be a tautology"); | |
862 | |
863 HeapWord* result = NULL; | |
864 | |
865 // We will loop while succeeded is false, which means that we tried | |
866 // to do a collection, but the VM op did not succeed. So, when we | |
867 // exit the loop, either one of the allocation attempts was | |
868 // successful, or we succeeded in doing the VM op but which was | |
869 // unable to allocate after the collection. | |
870 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
871 bool succeeded = true; | |
872 | |
873 // Given that humongous objects are not allocated in young | |
874 // regions, we'll first try to do the allocation without doing a | |
875 // collection hoping that there's enough space in the heap. | |
876 result = humongous_obj_allocate(word_size); | |
877 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
878 "catch a regression of this bug."); | |
879 if (result != NULL) { | |
880 if (!at_safepoint) { | |
881 // If we're not at a safepoint, unlock the Heap_lock. | |
882 Heap_lock->unlock(); | |
883 } | |
884 return result; | |
885 } | |
886 | |
887 // If we failed to allocate the humongous object, we should try to | |
888 // do a collection pause (if we're allowed) in case it reclaims | |
889 // enough space for the allocation to succeed after the pause. | |
890 if (!at_safepoint) { | |
891 // Read the GC count while holding the Heap_lock | |
892 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
893 | |
894 // If we're allowed to do a collection we're not at a | |
895 // safepoint, so it is safe to unlock the Heap_lock. | |
342 | 896 Heap_lock->unlock(); |
1973 | 897 |
898 result = do_collection_pause(word_size, gc_count_before, &succeeded); | |
899 assert_heap_not_locked(); | |
900 if (result != NULL) { | |
901 assert(succeeded, "the VM op should have succeeded"); | |
902 return result; | |
903 } | |
904 | |
905 // If we get here, the VM operation either did not succeed | |
906 // (i.e., another thread beat us to it) or it succeeded but | |
907 // failed to allocate the object. | |
908 | |
909 // If we're allowed to do a collection we're not at a | |
910 // safepoint, so it is safe to lock the Heap_lock. | |
911 Heap_lock->lock(); | |
912 } | |
913 | |
914 assert(result == NULL, "otherwise we should have exited the loop earlier"); | |
915 | |
916 // So far our attempts to allocate failed. The only time we'll go | |
917 // around the loop and try again is if we tried to do a GC and the | |
918 // VM op that we tried to schedule was not successful because | |
919 // another thread beat us to it. That way it's possible that some | |
920 // space was freed up by the thread that successfully scheduled a | |
921 // GC. So it's worth trying to allocate again. | |
922 if (succeeded) { | |
923 break; | |
342 | 924 } |
925 | |
1973 | 926 // Give a warning if we seem to be looping forever. |
927 if ((QueuedAllocationWarningCount > 0) && | |
928 (try_count % QueuedAllocationWarningCount == 0)) { | |
929 warning("G1CollectedHeap::attempt_allocation_humongous " | |
930 "retries %d times", try_count); | |
931 } | |
932 } | |
933 | |
934 assert_heap_locked_or_at_safepoint(); | |
935 return NULL; | |
936 } | |
937 | |
938 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, | |
939 bool expect_null_cur_alloc_region) { | |
940 assert_at_safepoint(); | |
941 assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region, | |
1975
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
942 err_msg("the current alloc region was unexpectedly found " |
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
943 "to be non-NULL, cur alloc region: "PTR_FORMAT" " |
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
944 "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT, |
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
945 _cur_alloc_region, expect_null_cur_alloc_region, word_size)); |
1973 | 946 |
947 if (!isHumongous(word_size)) { | |
948 if (!expect_null_cur_alloc_region) { | |
949 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
950 if (cur_alloc_region != NULL) { | |
951 // This allocate method does BOT updates and we don't need them in | |
952 // the young generation. This will be fixed in the near future by | |
953 // CR 6994297. | |
954 HeapWord* result = cur_alloc_region->allocate(word_size); | |
955 if (result != NULL) { | |
956 assert(is_in(result), "result should be in the heap"); | |
957 | |
958 // We will not do any dirtying here. This is guaranteed to be | |
959 // called during a safepoint and the thread that scheduled the | |
960 // pause will do the dirtying if we return a non-NULL result. | |
961 return result; | |
962 } | |
963 | |
964 retire_cur_alloc_region_common(cur_alloc_region); | |
965 } | |
342 | 966 } |
1973 | 967 |
968 assert(_cur_alloc_region == NULL, | |
969 "at this point we should have no cur alloc region"); | |
970 return replace_cur_alloc_region_and_allocate(word_size, | |
971 true, /* at_safepoint */ | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
972 false /* do_dirtying */, |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
973 false /* can_expand */); |
1973 | 974 } else { |
975 return attempt_allocation_humongous(word_size, | |
976 true /* at_safepoint */); | |
977 } | |
978 | |
979 ShouldNotReachHere(); | |
980 } | |
981 | |
982 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { | |
983 assert_heap_not_locked_and_not_at_safepoint(); | |
984 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); | |
985 | |
986 Heap_lock->lock(); | |
987 | |
988 // First attempt: try allocating out of the current alloc region or | |
989 // after replacing the current alloc region. | |
990 HeapWord* result = attempt_allocation(word_size); | |
991 if (result != NULL) { | |
992 assert_heap_not_locked(); | |
993 return result; | |
994 } | |
995 | |
996 assert_heap_locked(); | |
997 | |
998 // Second attempt: go into the even slower path where we might | |
999 // try to schedule a collection. | |
1000 result = attempt_allocation_slow(word_size); | |
1001 if (result != NULL) { | |
1002 assert_heap_not_locked(); | |
1003 return result; | |
1004 } | |
1005 | |
1006 assert_heap_locked(); | |
1007 Heap_lock->unlock(); | |
1008 return NULL; | |
342 | 1009 } |
1010 | |
1011 HeapWord* | |
1012 G1CollectedHeap::mem_allocate(size_t word_size, | |
1013 bool is_noref, | |
1014 bool is_tlab, | |
1973 | 1015 bool* gc_overhead_limit_was_exceeded) { |
1016 assert_heap_not_locked_and_not_at_safepoint(); | |
1017 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
1018 "to allocate TLABs"); | |
342 | 1019 |
1020 // Loop until the allocation is satisified, | |
1021 // or unsatisfied after GC. | |
1973 | 1022 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1023 unsigned int gc_count_before; | |
342 | 1024 { |
1025 Heap_lock->lock(); | |
1973 | 1026 |
1027 if (!isHumongous(word_size)) { | |
1028 // First attempt: try allocating out of the current alloc | |
1029 // region or after replacing the current alloc region. | |
1030 HeapWord* result = attempt_allocation(word_size); | |
1031 if (result != NULL) { | |
1032 assert_heap_not_locked(); | |
1033 return result; | |
1034 } | |
1035 | |
1036 assert_heap_locked(); | |
1037 | |
1038 // Second attempt: go into the even slower path where we might | |
1039 // try to schedule a collection. | |
1040 result = attempt_allocation_slow(word_size); | |
1041 if (result != NULL) { | |
1042 assert_heap_not_locked(); | |
1043 return result; | |
1044 } | |
1045 } else { | |
1046 HeapWord* result = attempt_allocation_humongous(word_size, | |
1047 false /* at_safepoint */); | |
1048 if (result != NULL) { | |
1049 assert_heap_not_locked(); | |
1050 return result; | |
1051 } | |
342 | 1052 } |
1973 | 1053 |
1054 assert_heap_locked(); | |
342 | 1055 // Read the gc count while the heap lock is held. |
1056 gc_count_before = SharedHeap::heap()->total_collections(); | |
1973 | 1057 // We cannot be at a safepoint, so it is safe to unlock the Heap_lock |
342 | 1058 Heap_lock->unlock(); |
1059 } | |
1060 | |
1061 // Create the garbage collection operation... | |
1973 | 1062 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 1063 // ...and get the VM thread to execute it. |
1064 VMThread::execute(&op); | |
1973 | 1065 |
1066 assert_heap_not_locked(); | |
1067 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
1068 // If the operation was successful we'll return the result even | |
1069 // if it is NULL. If the allocation attempt failed immediately | |
1070 // after a Full GC, it's unlikely we'll be able to allocate now. | |
1071 HeapWord* result = op.result(); | |
1072 if (result != NULL && !isHumongous(word_size)) { | |
1073 // Allocations that take place on VM operations do not do any | |
1074 // card dirtying and we have to do it here. We only have to do | |
1075 // this for non-humongous allocations, though. | |
1076 dirty_young_block(result, word_size); | |
1077 } | |
342 | 1078 return result; |
1973 | 1079 } else { |
1080 assert(op.result() == NULL, | |
1081 "the result should be NULL if the VM op did not succeed"); | |
342 | 1082 } |
1083 | |
1084 // Give a warning if we seem to be looping forever. | |
1085 if ((QueuedAllocationWarningCount > 0) && | |
1086 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 1087 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 1088 } |
1089 } | |
1973 | 1090 |
1091 ShouldNotReachHere(); | |
342 | 1092 } |
1093 | |
1094 void G1CollectedHeap::abandon_cur_alloc_region() { | |
1095 if (_cur_alloc_region != NULL) { | |
1096 // We're finished with the _cur_alloc_region. | |
1097 if (_cur_alloc_region->is_empty()) { | |
1098 _free_regions++; | |
1099 free_region(_cur_alloc_region); | |
1100 } else { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1101 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1102 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1103 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1104 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1105 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1106 } |
342 | 1107 _summary_bytes_used += _cur_alloc_region->used(); |
1108 } | |
1109 _cur_alloc_region = NULL; | |
1110 } | |
1111 } | |
1112 | |
636 | 1113 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1114 // first, make sure that the GC alloc region list is empty (it should!) | |
1115 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1116 release_gc_alloc_regions(true /* totally */); | |
1117 } | |
1118 | |
342 | 1119 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1120 ModRefBarrierSet* _mr_bs; | |
1121 public: | |
1122 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1123 bool doHeapRegion(HeapRegion* r) { | |
1124 r->reset_gc_time_stamp(); | |
1125 if (r->continuesHumongous()) | |
1126 return false; | |
1127 HeapRegionRemSet* hrrs = r->rem_set(); | |
1128 if (hrrs != NULL) hrrs->clear(); | |
1129 // You might think here that we could clear just the cards | |
1130 // corresponding to the used region. But no: if we leave a dirty card | |
1131 // in a region we might allocate into, then it would prevent that card | |
1132 // from being enqueued, and cause it to be missed. | |
1133 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1134 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1135 return false; | |
1136 } | |
1137 }; | |
1138 | |
1139 | |
1140 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1141 ModRefBarrierSet* _mr_bs; | |
1142 public: | |
1143 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1144 bool doHeapRegion(HeapRegion* r) { | |
1145 if (r->continuesHumongous()) return false; | |
1146 if (r->used_region().word_size() != 0) { | |
1147 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1148 } | |
1149 return false; | |
1150 } | |
1151 }; | |
1152 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1153 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1154 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1155 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1156 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1157 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1158 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1159 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1160 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1161 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1162 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1163 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1164 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1165 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1166 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1167 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1168 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1169 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1170 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1171 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1172 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1173 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1174 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1175 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1176 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1177 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1178 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1179 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1180 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1181 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1182 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1183 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1184 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1185 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1186 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1187 |
1973 | 1188 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1189 bool clear_all_soft_refs, |
342 | 1190 size_t word_size) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1191 if (GC_locker::check_active_before_gc()) { |
1973 | 1192 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1193 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1194 |
342 | 1195 ResourceMark rm; |
1196 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1197 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1198 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1199 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1200 |
342 | 1201 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
1202 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
1203 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1204 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1205 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1206 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1207 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1208 |
342 | 1209 { |
1210 IsGCActiveMark x; | |
1211 | |
1212 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1213 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1214 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1215 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1216 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1217 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1218 PrintGC, true, gclog_or_tty); |
342 | 1219 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1220 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1221 |
342 | 1222 double start = os::elapsedTime(); |
1223 g1_policy()->record_full_collection_start(); | |
1224 | |
1225 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1226 increment_total_collections(true /* full gc */); |
342 | 1227 |
1228 size_t g1h_prev_used = used(); | |
1229 assert(used() == recalculate_used(), "Should be equal"); | |
1230 | |
1231 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1232 HandleMark hm; // Discard invalid handles created during verification | |
1233 prepare_for_verify(); | |
1234 gclog_or_tty->print(" VerifyBeforeGC:"); | |
1235 Universe::verify(true); | |
1236 } | |
1237 assert(regions_accounted_for(), "Region leakage!"); | |
1238 | |
1239 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1240 | |
1241 // We want to discover references, but not process them yet. | |
1242 // This mode is disabled in | |
1243 // instanceRefKlass::process_discovered_references if the | |
1244 // generation does some collection work, or | |
1245 // instanceRefKlass::enqueue_discovered_references if the | |
1246 // generation returns without doing any work. | |
1247 ref_processor()->disable_discovery(); | |
1248 ref_processor()->abandon_partial_discovery(); | |
1249 ref_processor()->verify_no_references_recorded(); | |
1250 | |
1251 // Abandon current iterations of concurrent marking and concurrent | |
1252 // refinement, if any are in progress. | |
1253 concurrent_mark()->abort(); | |
1254 | |
1255 // Make sure we'll choose a new allocation region afterwards. | |
1256 abandon_cur_alloc_region(); | |
636 | 1257 abandon_gc_alloc_regions(); |
342 | 1258 assert(_cur_alloc_region == NULL, "Invariant."); |
1861 | 1259 g1_rem_set()->cleanupHRRS(); |
342 | 1260 tear_down_region_lists(); |
1261 set_used_regions_to_need_zero_fill(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1262 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1263 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1264 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1265 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1266 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1267 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1268 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1269 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1270 |
342 | 1271 if (g1_policy()->in_young_gc_mode()) { |
1272 empty_young_list(); | |
1273 g1_policy()->set_full_young_gcs(true); | |
1274 } | |
1275 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1276 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1277 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1278 |
342 | 1279 // Temporarily make reference _discovery_ single threaded (non-MT). |
1280 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
1281 | |
1282 // Temporarily make refs discovery atomic | |
1283 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1284 | |
1285 // Temporarily clear _is_alive_non_header | |
1286 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1287 | |
1288 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1289 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1290 |
1291 // Do collection work | |
1292 { | |
1293 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1294 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1295 } |
1296 // Because freeing humongous regions may have added some unclean | |
1297 // regions, it is necessary to tear down again before rebuilding. | |
1298 tear_down_region_lists(); | |
1299 rebuild_region_lists(); | |
1300 | |
1301 _summary_bytes_used = recalculate_used(); | |
1302 | |
1303 ref_processor()->enqueue_discovered_references(); | |
1304 | |
1305 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1306 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1307 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1308 |
342 | 1309 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1310 HandleMark hm; // Discard invalid handles created during verification | |
1311 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1312 prepare_for_verify(); |
342 | 1313 Universe::verify(false); |
1314 } | |
1315 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1316 | |
1317 reset_gc_time_stamp(); | |
1318 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 // sets. We will also reset the GC time stamps of the regions. |
342 | 1321 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1322 heap_region_iterate(&rs_clear); | |
1323 | |
1324 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1325 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1326 |
1327 if (_cg1r->use_cache()) { | |
1328 _cg1r->clear_and_record_card_counts(); | |
1329 _cg1r->clear_hot_cache(); | |
1330 } | |
1331 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1332 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1333 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1334 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1335 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1336 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1337 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1338 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1339 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1340 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1341 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1342 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1343 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1344 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1345 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1346 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1347 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1348 |
342 | 1349 if (PrintGC) { |
1350 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1351 } | |
1352 | |
1353 if (true) { // FIXME | |
1354 // Ask the permanent generation to adjust size for full collections | |
1355 perm()->compute_new_size(); | |
1356 } | |
1357 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1358 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1359 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1360 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1361 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1362 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1363 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1364 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1365 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1366 |
342 | 1367 double end = os::elapsedTime(); |
1368 g1_policy()->record_full_collection_end(); | |
1369 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1370 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1371 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1372 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1373 |
342 | 1374 gc_epilogue(true); |
1375 | |
794 | 1376 // Discard all rset updates |
1377 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1378 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1379 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1380 assert(regions_accounted_for(), "Region leakage!"); |
1381 } | |
1382 | |
1383 if (g1_policy()->in_young_gc_mode()) { | |
1384 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1385 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1386 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1387 assert( check_young_list_empty(true /* check_heap */), |
342 | 1388 "young list should be empty at this point"); |
1389 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1390 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1391 // Update the number of full collections that have been completed. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
1392 increment_full_collections_completed(false /* concurrent */); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1393 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1394 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1395 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1396 } |
1973 | 1397 |
1398 return true; | |
342 | 1399 } |
1400 | |
1401 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1402 // do_collection() will return whether it succeeded in performing |
1403 // the GC. Currently, there is no facility on the | |
1404 // do_full_collection() API to notify the caller than the collection | |
1405 // did not succeed (e.g., because it was locked out by the GC | |
1406 // locker). So, right now, we'll ignore the return value. | |
1407 bool dummy = do_collection(true, /* explicit_gc */ | |
1408 clear_all_soft_refs, | |
1409 0 /* word_size */); | |
342 | 1410 } |
1411 | |
1412 // This code is mostly copied from TenuredGeneration. | |
1413 void | |
1414 G1CollectedHeap:: | |
1415 resize_if_necessary_after_full_collection(size_t word_size) { | |
1416 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1417 | |
1418 // Include the current allocation, if any, and bytes that will be | |
1419 // pre-allocated to support collections, as "used". | |
1420 const size_t used_after_gc = used(); | |
1421 const size_t capacity_after_gc = capacity(); | |
1422 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1423 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1424 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1425 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1426 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1427 |
342 | 1428 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1429 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1430 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1432 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1433 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1438 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1439 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1440 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1441 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1443 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1444 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1445 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1446 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1447 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1448 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1449 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1450 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1451 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1452 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1453 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1454 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1455 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1456 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1457 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1458 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1459 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1460 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1461 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1462 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1463 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1464 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1465 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1466 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1467 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1468 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1469 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1470 |
1471 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1472 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1473 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1474 gclog_or_tty->print_cr("Computing new size after full GC "); |
1475 gclog_or_tty->print_cr(" " | |
1476 " minimum_free_percentage: %6.2f", | |
1477 minimum_free_percentage); | |
1478 gclog_or_tty->print_cr(" " | |
1479 " maximum_free_percentage: %6.2f", | |
1480 maximum_free_percentage); | |
1481 gclog_or_tty->print_cr(" " | |
1482 " capacity: %6.1fK" | |
1483 " minimum_desired_capacity: %6.1fK" | |
1484 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1485 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1486 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1487 (double) maximum_desired_capacity / (double) K); |
342 | 1488 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1489 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1490 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1491 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1492 (double) used_after_gc / (double) K); |
342 | 1493 gclog_or_tty->print_cr(" " |
1494 " free_percentage: %6.2f", | |
1495 free_percentage); | |
1496 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1497 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1498 // Don't expand unless it's significant |
1499 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1500 expand(expand_bytes); | |
1501 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1502 gclog_or_tty->print_cr(" " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1503 " expanding:" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1504 " max_heap_size: %6.1fK" |
342 | 1505 " minimum_desired_capacity: %6.1fK" |
1506 " expand_bytes: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1507 (double) max_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1508 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1509 (double) expand_bytes / (double) K); |
342 | 1510 } |
1511 | |
1512 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1513 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1514 // Capacity too large, compute shrinking size |
1515 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1516 shrink(shrink_bytes); | |
1517 if (PrintGC && Verbose) { | |
1518 gclog_or_tty->print_cr(" " | |
1519 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1520 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1521 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1522 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1523 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1524 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1525 (double) shrink_bytes / (double) K); |
342 | 1526 } |
1527 } | |
1528 } | |
1529 | |
1530 | |
1531 HeapWord* | |
1973 | 1532 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1533 bool* succeeded) { | |
1534 assert(SafepointSynchronize::is_at_safepoint(), | |
1535 "satisfy_failed_allocation() should only be called at a safepoint"); | |
1536 assert(Thread::current()->is_VM_thread(), | |
1537 "satisfy_failed_allocation() should only be called by the VM thread"); | |
1538 | |
1539 *succeeded = true; | |
1540 // Let's attempt the allocation first. | |
1541 HeapWord* result = attempt_allocation_at_safepoint(word_size, | |
1542 false /* expect_null_cur_alloc_region */); | |
1543 if (result != NULL) { | |
1544 assert(*succeeded, "sanity"); | |
1545 return result; | |
1546 } | |
342 | 1547 |
1548 // In a G1 heap, we're supposed to keep allocation from failing by | |
1549 // incremental pauses. Therefore, at least for now, we'll favor | |
1550 // expansion over collection. (This might change in the future if we can | |
1551 // do something smarter than full collection to satisfy a failed alloc.) | |
1552 result = expand_and_allocate(word_size); | |
1553 if (result != NULL) { | |
1973 | 1554 assert(*succeeded, "sanity"); |
342 | 1555 return result; |
1556 } | |
1557 | |
1973 | 1558 // Expansion didn't work, we'll try to do a Full GC. |
1559 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1560 false, /* clear_all_soft_refs */ | |
1561 word_size); | |
1562 if (!gc_succeeded) { | |
1563 *succeeded = false; | |
1564 return NULL; | |
1565 } | |
1566 | |
1567 // Retry the allocation | |
1568 result = attempt_allocation_at_safepoint(word_size, | |
1569 true /* expect_null_cur_alloc_region */); | |
342 | 1570 if (result != NULL) { |
1973 | 1571 assert(*succeeded, "sanity"); |
342 | 1572 return result; |
1573 } | |
1574 | |
1973 | 1575 // Then, try a Full GC that will collect all soft references. |
1576 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1577 true, /* clear_all_soft_refs */ | |
1578 word_size); | |
1579 if (!gc_succeeded) { | |
1580 *succeeded = false; | |
1581 return NULL; | |
1582 } | |
1583 | |
1584 // Retry the allocation once more | |
1585 result = attempt_allocation_at_safepoint(word_size, | |
1586 true /* expect_null_cur_alloc_region */); | |
342 | 1587 if (result != NULL) { |
1973 | 1588 assert(*succeeded, "sanity"); |
342 | 1589 return result; |
1590 } | |
1591 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1592 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1593 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1594 |
342 | 1595 // What else? We might try synchronous finalization later. If the total |
1596 // space available is large enough for the allocation, then a more | |
1597 // complete compaction phase than we've tried so far might be | |
1598 // appropriate. | |
1973 | 1599 assert(*succeeded, "sanity"); |
342 | 1600 return NULL; |
1601 } | |
1602 | |
1603 // Attempting to expand the heap sufficiently | |
1604 // to support an allocation of the given "word_size". If | |
1605 // successful, perform the allocation and return the address of the | |
1606 // allocated block, or else "NULL". | |
1607 | |
1608 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1973 | 1609 assert(SafepointSynchronize::is_at_safepoint(), |
1610 "expand_and_allocate() should only be called at a safepoint"); | |
1611 assert(Thread::current()->is_VM_thread(), | |
1612 "expand_and_allocate() should only be called by the VM thread"); | |
1613 | |
342 | 1614 size_t expand_bytes = word_size * HeapWordSize; |
1615 if (expand_bytes < MinHeapDeltaBytes) { | |
1616 expand_bytes = MinHeapDeltaBytes; | |
1617 } | |
1618 expand(expand_bytes); | |
1619 assert(regions_accounted_for(), "Region leakage!"); | |
1973 | 1620 |
1621 return attempt_allocation_at_safepoint(word_size, | |
1975
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
1622 false /* expect_null_cur_alloc_region */); |
342 | 1623 } |
1624 | |
1625 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1626 size_t pre_used = 0; | |
1627 size_t cleared_h_regions = 0; | |
1628 size_t freed_regions = 0; | |
1629 UncleanRegionList local_list; | |
1630 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1631 freed_regions, &local_list); | |
1632 | |
1633 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1634 &local_list); | |
1635 return pre_used; | |
1636 } | |
1637 | |
1638 void | |
1639 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1640 size_t& pre_used, | |
1641 size_t& cleared_h, | |
1642 size_t& freed_regions, | |
1643 UncleanRegionList* list, | |
1644 bool par) { | |
1645 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1646 size_t res = 0; | |
677 | 1647 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1648 !hr->is_young()) { | |
1649 if (G1PolicyVerbose > 0) | |
1650 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1651 " during cleanup", hr, hr->used()); | |
1652 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1653 } |
1654 } | |
1655 | |
1656 // FIXME: both this and shrink could probably be more efficient by | |
1657 // doing one "VirtualSpace::expand_by" call rather than several. | |
1658 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1659 size_t old_mem_size = _g1_storage.committed_size(); | |
1660 // We expand by a minimum of 1K. | |
1661 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1662 size_t aligned_expand_bytes = | |
1663 ReservedSpace::page_align_size_up(expand_bytes); | |
1664 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1665 HeapRegion::GrainBytes); | |
1666 expand_bytes = aligned_expand_bytes; | |
1667 while (expand_bytes > 0) { | |
1668 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1669 // Commit more storage. | |
1670 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1671 if (!successful) { | |
1672 expand_bytes = 0; | |
1673 } else { | |
1674 expand_bytes -= HeapRegion::GrainBytes; | |
1675 // Expand the committed region. | |
1676 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1677 _g1_committed.set_end(high); | |
1678 // Create a new HeapRegion. | |
1679 MemRegion mr(base, high); | |
1680 bool is_zeroed = !_g1_max_committed.contains(base); | |
1681 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1682 | |
1683 // Now update max_committed if necessary. | |
1684 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1685 | |
1686 // Add it to the HeapRegionSeq. | |
1687 _hrs->insert(hr); | |
1688 // Set the zero-fill state, according to whether it's already | |
1689 // zeroed. | |
1690 { | |
1691 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1692 if (is_zeroed) { | |
1693 hr->set_zero_fill_complete(); | |
1694 put_free_region_on_list_locked(hr); | |
1695 } else { | |
1696 hr->set_zero_fill_needed(); | |
1697 put_region_on_unclean_list_locked(hr); | |
1698 } | |
1699 } | |
1700 _free_regions++; | |
1701 // And we used up an expansion region to create it. | |
1702 _expansion_regions--; | |
1703 // Tell the cardtable about it. | |
1704 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1705 // And the offset table as well. | |
1706 _bot_shared->resize(_g1_committed.word_size()); | |
1707 } | |
1708 } | |
1709 if (Verbose && PrintGC) { | |
1710 size_t new_mem_size = _g1_storage.committed_size(); | |
1711 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1712 old_mem_size/K, aligned_expand_bytes/K, | |
1713 new_mem_size/K); | |
1714 } | |
1715 } | |
1716 | |
1717 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1718 { | |
1719 size_t old_mem_size = _g1_storage.committed_size(); | |
1720 size_t aligned_shrink_bytes = | |
1721 ReservedSpace::page_align_size_down(shrink_bytes); | |
1722 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1723 HeapRegion::GrainBytes); | |
1724 size_t num_regions_deleted = 0; | |
1725 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1726 | |
1727 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1728 if (mr.byte_size() > 0) | |
1729 _g1_storage.shrink_by(mr.byte_size()); | |
1730 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1731 | |
1732 _g1_committed.set_end(mr.start()); | |
1733 _free_regions -= num_regions_deleted; | |
1734 _expansion_regions += num_regions_deleted; | |
1735 | |
1736 // Tell the cardtable about it. | |
1737 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1738 | |
1739 // And the offset table as well. | |
1740 _bot_shared->resize(_g1_committed.word_size()); | |
1741 | |
1742 HeapRegionRemSet::shrink_heap(n_regions()); | |
1743 | |
1744 if (Verbose && PrintGC) { | |
1745 size_t new_mem_size = _g1_storage.committed_size(); | |
1746 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1747 old_mem_size/K, aligned_shrink_bytes/K, | |
1748 new_mem_size/K); | |
1749 } | |
1750 } | |
1751 | |
1752 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1753 release_gc_alloc_regions(true /* totally */); |
342 | 1754 tear_down_region_lists(); // We will rebuild them in a moment. |
1755 shrink_helper(shrink_bytes); | |
1756 rebuild_region_lists(); | |
1757 } | |
1758 | |
1759 // Public methods. | |
1760 | |
1761 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1762 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1763 #endif // _MSC_VER | |
1764 | |
1765 | |
1766 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1767 SharedHeap(policy_), | |
1768 _g1_policy(policy_), | |
1111 | 1769 _dirty_card_queue_set(false), |
1705 | 1770 _into_cset_dirty_card_queue_set(false), |
342 | 1771 _ref_processor(NULL), |
1772 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1773 _bot_shared(NULL), | |
1774 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1775 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1776 _evac_failure_scan_stack(NULL) , | |
1777 _mark_in_progress(false), | |
1778 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1779 _cur_alloc_region(NULL), | |
1780 _refine_cte_cl(NULL), | |
1781 _free_region_list(NULL), _free_region_list_size(0), | |
1782 _free_regions(0), | |
1783 _full_collection(false), | |
1784 _unclean_region_list(), | |
1785 _unclean_regions_coming(false), | |
1786 _young_list(new YoungList(this)), | |
1787 _gc_time_stamp(0), | |
526 | 1788 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1789 _full_collections_completed(0), |
526 | 1790 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1791 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1792 _dirty_cards_region_list(NULL) { |
342 | 1793 _g1h = this; // To catch bugs. |
1794 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1795 vm_exit_during_initialization("Failed necessary allocation."); | |
1796 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1797 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1798 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1799 |
342 | 1800 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1801 _task_queues = new RefToScanQueueSet(n_queues); | |
1802 | |
1803 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1804 assert(n_rem_sets > 0, "Invariant."); | |
1805 | |
1806 HeapRegionRemSetIterator** iter_arr = | |
1807 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1808 for (int i = 0; i < n_queues; i++) { | |
1809 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1810 } | |
1811 _rem_set_iterator = iter_arr; | |
1812 | |
1813 for (int i = 0; i < n_queues; i++) { | |
1814 RefToScanQueue* q = new RefToScanQueue(); | |
1815 q->initialize(); | |
1816 _task_queues->register_queue(i, q); | |
1817 } | |
1818 | |
1819 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1820 _gc_alloc_regions[ap] = NULL; |
1821 _gc_alloc_region_counts[ap] = 0; | |
1822 _retained_gc_alloc_regions[ap] = NULL; | |
1823 // by default, we do not retain a GC alloc region for each ap; | |
1824 // we'll override this, when appropriate, below | |
1825 _retain_gc_alloc_region[ap] = false; | |
1826 } | |
1827 | |
1828 // We will try to remember the last half-full tenured region we | |
1829 // allocated to at the end of a collection so that we can re-use it | |
1830 // during the next collection. | |
1831 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1832 | |
342 | 1833 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1834 } | |
1835 | |
1836 jint G1CollectedHeap::initialize() { | |
1166 | 1837 CollectedHeap::pre_initialize(); |
342 | 1838 os::enable_vtime(); |
1839 | |
1840 // Necessary to satisfy locking discipline assertions. | |
1841 | |
1842 MutexLocker x(Heap_lock); | |
1843 | |
1844 // While there are no constraints in the GC code that HeapWordSize | |
1845 // be any particular value, there are multiple other areas in the | |
1846 // system which believe this to be true (e.g. oop->object_size in some | |
1847 // cases incorrectly returns the size in wordSize units rather than | |
1848 // HeapWordSize). | |
1849 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1850 | |
1851 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1852 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1853 | |
1854 // Ensure that the sizes are properly aligned. | |
1855 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1856 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1857 | |
1858 _cg1r = new ConcurrentG1Refine(); | |
1859 | |
1860 // Reserve the maximum. | |
1861 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1862 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1863 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1864 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1865 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1866 |
342 | 1867 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1868 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1869 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1870 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1871 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1872 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1873 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1874 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1875 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1876 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1877 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1878 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1879 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1880 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1881 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1882 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1883 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1884 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1885 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1886 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1887 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1888 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1889 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1890 } |
342 | 1891 |
1892 if (!heap_rs.is_reserved()) { | |
1893 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1894 return JNI_ENOMEM; | |
1895 } | |
1896 | |
1897 // It is important to do this in a way such that concurrent readers can't | |
1898 // temporarily think somethings in the heap. (I've actually seen this | |
1899 // happen in asserts: DLD.) | |
1900 _reserved.set_word_size(0); | |
1901 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1902 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1903 | |
1904 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1905 | |
1906 _num_humongous_regions = 0; | |
1907 | |
1908 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1909 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1910 set_barrier_set(rem_set()->bs()); | |
1911 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1912 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1913 } else { | |
1914 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1915 return JNI_ENOMEM; | |
1916 } | |
1917 | |
1918 // Also create a G1 rem set. | |
1861 | 1919 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1920 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1921 } else { |
1861 | 1922 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1923 return JNI_ENOMEM; | |
342 | 1924 } |
1925 | |
1926 // Carve out the G1 part of the heap. | |
1927 | |
1928 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1929 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1930 g1_rs.size()/HeapWordSize); | |
1931 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1932 | |
1933 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1934 | |
1935 _g1_storage.initialize(g1_rs, 0); | |
1936 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1937 _g1_max_committed = _g1_committed; | |
393 | 1938 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1939 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1940 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1941 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1942 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1943 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1944 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1945 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1946 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1947 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1948 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1949 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1950 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1951 |
342 | 1952 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1953 heap_word_size(init_byte_size)); | |
1954 | |
1955 _g1h = this; | |
1956 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1957 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1958 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1959 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1960 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1961 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1962 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1963 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1964 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1965 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1966 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1967 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1968 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1969 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1970 |
342 | 1971 // Create the ConcurrentMark data structure and thread. |
1972 // (Must do this late, so that "max_regions" is defined.) | |
1973 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1974 _cmThread = _cm->cmThread(); | |
1975 | |
1976 // ...and the concurrent zero-fill thread, if necessary. | |
1977 if (G1ConcZeroFill) { | |
1978 _czft = new ConcurrentZFThread(); | |
1979 } | |
1980 | |
1981 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1982 HeapRegionRemSet::init_heap(max_regions()); | |
1983 | |
677 | 1984 // Now expand into the initial heap size. |
1985 expand(init_byte_size); | |
342 | 1986 |
1987 // Perform any initialization actions delegated to the policy. | |
1988 g1_policy()->init(); | |
1989 | |
1990 g1_policy()->note_start_of_mark_thread(); | |
1991 | |
1992 _refine_cte_cl = | |
1993 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1994 g1_rem_set(), | |
1995 concurrent_g1_refine()); | |
1996 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1997 | |
1998 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1999 SATB_Q_FL_lock, | |
1111 | 2000 G1SATBProcessCompletedThreshold, |
342 | 2001 Shared_SATB_Q_lock); |
794 | 2002 |
2003 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
2004 DirtyCardQ_FL_lock, | |
1111 | 2005 concurrent_g1_refine()->yellow_zone(), |
2006 concurrent_g1_refine()->red_zone(), | |
794 | 2007 Shared_DirtyCardQ_lock); |
2008 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2009 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2010 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2011 DirtyCardQ_FL_lock, |
1111 | 2012 -1, // never trigger processing |
2013 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2014 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2015 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2016 } |
1705 | 2017 |
2018 // Initialize the card queue set used to hold cards containing | |
2019 // references into the collection set. | |
2020 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
2021 DirtyCardQ_FL_lock, | |
2022 -1, // never trigger processing | |
2023 -1, // no limit on length | |
2024 Shared_DirtyCardQ_lock, | |
2025 &JavaThread::dirty_card_queue_set()); | |
2026 | |
342 | 2027 // In case we're keeping closure specialization stats, initialize those |
2028 // counts and that mechanism. | |
2029 SpecializationStats::clear(); | |
2030 | |
2031 _gc_alloc_region_list = NULL; | |
2032 | |
2033 // Do later initialization work for concurrent refinement. | |
2034 _cg1r->init(); | |
2035 | |
2036 return JNI_OK; | |
2037 } | |
2038 | |
2039 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2040 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2041 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2042 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2043 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2044 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2045 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2046 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2047 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2048 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2049 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2050 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2051 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2052 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2053 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2054 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2055 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2056 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2057 |
342 | 2058 SharedHeap::ref_processing_init(); |
2059 MemRegion mr = reserved_region(); | |
2060 _ref_processor = ReferenceProcessor::create_ref_processor( | |
2061 mr, // span | |
2062 false, // Reference discovery is not atomic | |
2063 true, // mt_discovery | |
2064 NULL, // is alive closure: need to fill this in for efficiency | |
2065 ParallelGCThreads, | |
2066 ParallelRefProcEnabled, | |
2067 true); // Setting next fields of discovered | |
2068 // lists requires a barrier. | |
2069 } | |
2070 | |
2071 size_t G1CollectedHeap::capacity() const { | |
2072 return _g1_committed.byte_size(); | |
2073 } | |
2074 | |
1705 | 2075 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2076 DirtyCardQueue* into_cset_dcq, | |
2077 bool concurrent, | |
342 | 2078 int worker_i) { |
889 | 2079 // Clean cards in the hot card cache |
1705 | 2080 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2081 |
342 | 2082 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2083 int n_completed_buffers = 0; | |
1705 | 2084 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2085 n_completed_buffers++; |
2086 } | |
2087 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2088 (double) n_completed_buffers); | |
2089 dcqs.clear_n_completed_buffers(); | |
2090 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2091 } | |
2092 | |
2093 | |
2094 // Computes the sum of the storage used by the various regions. | |
2095 | |
2096 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2097 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2098 "Should be owned on this thread's behalf."); |
342 | 2099 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2100 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2101 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2102 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2103 result += hr->used(); |
342 | 2104 return result; |
2105 } | |
2106 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2107 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2108 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2109 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2110 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2111 |
342 | 2112 class SumUsedClosure: public HeapRegionClosure { |
2113 size_t _used; | |
2114 public: | |
2115 SumUsedClosure() : _used(0) {} | |
2116 bool doHeapRegion(HeapRegion* r) { | |
2117 if (!r->continuesHumongous()) { | |
2118 _used += r->used(); | |
2119 } | |
2120 return false; | |
2121 } | |
2122 size_t result() { return _used; } | |
2123 }; | |
2124 | |
2125 size_t G1CollectedHeap::recalculate_used() const { | |
2126 SumUsedClosure blk; | |
2127 _hrs->iterate(&blk); | |
2128 return blk.result(); | |
2129 } | |
2130 | |
2131 #ifndef PRODUCT | |
2132 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2133 size_t _num; | |
2134 public: | |
677 | 2135 SumUsedRegionsClosure() : _num(0) {} |
342 | 2136 bool doHeapRegion(HeapRegion* r) { |
2137 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2138 _num += 1; | |
2139 } | |
2140 return false; | |
2141 } | |
2142 size_t result() { return _num; } | |
2143 }; | |
2144 | |
2145 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2146 SumUsedRegionsClosure blk; | |
2147 _hrs->iterate(&blk); | |
2148 return blk.result(); | |
2149 } | |
2150 #endif // PRODUCT | |
2151 | |
2152 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2153 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
2154 // otherwise, is there space in the current allocation region? | |
2155 | |
2156 // We need to store the current allocation region in a local variable | |
2157 // here. The problem is that this method doesn't take any locks and | |
2158 // there may be other threads which overwrite the current allocation | |
2159 // region field. attempt_allocation(), for example, sets it to NULL | |
2160 // and this can happen *after* the NULL check here but before the call | |
2161 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2162 // to be a problem in the optimized build, since the two loads of the | |
2163 // current allocation region field are optimized away. | |
2164 HeapRegion* car = _cur_alloc_region; | |
2165 | |
2166 // FIXME: should iterate over all regions? | |
2167 if (car == NULL) { | |
2168 return 0; | |
2169 } | |
2170 return car->free(); | |
2171 } | |
2172 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2173 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2174 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2175 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2176 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2177 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2178 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2179 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2180 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2181 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2182 // We assume that if concurrent == true, then the caller is a |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2183 // concurrent thread that was joined the Suspendible Thread |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2184 // Set. If there's ever a cheap way to check this, we should add an |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2185 // assert here. |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2186 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2187 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2188 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2189 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2190 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2191 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2192 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2193 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2194 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2195 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2196 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2197 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2198 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2199 // This is the case for the inner caller, i.e. a Full GC. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2200 assert(concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2201 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2202 (full_collections_started == _full_collections_completed + 2), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2203 err_msg("for inner caller (Full GC): full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2204 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2205 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2206 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2207 // This is the case for the outer caller, i.e. the concurrent cycle. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2208 assert(!concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2209 (full_collections_started == _full_collections_completed + 1), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2210 err_msg("for outer caller (concurrent cycle): " |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2211 "full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2212 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2213 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2214 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2215 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2216 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2217 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2218 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2219 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2220 // incorrectly see that a marking cyle is still in progress. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2221 if (concurrent) { |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2222 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2223 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2224 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2225 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2226 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2227 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2228 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2229 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2230 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2231 |
342 | 2232 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2233 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
2234 assert(Heap_lock->is_locked(), "Precondition#2"); | |
2235 GCCauseSetter gcs(this, cause); | |
2236 switch (cause) { | |
2237 case GCCause::_heap_inspection: | |
2238 case GCCause::_heap_dump: { | |
2239 HandleMark hm; | |
2240 do_full_collection(false); // don't clear all soft refs | |
2241 break; | |
2242 } | |
2243 default: // XXX FIX ME | |
2244 ShouldNotReachHere(); // Unexpected use of this function | |
2245 } | |
2246 } | |
2247 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2248 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2249 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2250 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2251 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2252 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2253 unsigned int full_gc_count_before; |
342 | 2254 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2255 MutexLocker ml(Heap_lock); |
1973 | 2256 |
2257 // Don't want to do a GC until cleanup is completed. This | |
2258 // limitation will be removed in the near future when the | |
2259 // operation of the free region list is revamped as part of | |
2260 // CR 6977804. | |
2261 wait_for_cleanup_complete(); | |
2262 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2263 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2264 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2265 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2266 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2267 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2268 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2269 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2270 // concurrent cycle. We're setting word_size to 0 which means that |
2271 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2272 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2273 0, /* word_size */ |
2274 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2275 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2276 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2277 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2278 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2279 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2280 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2281 |
1973 | 2282 // Schedule a standard evacuation pause. We're setting word_size |
2283 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2284 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2285 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2286 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2287 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2288 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2289 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2290 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2291 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2292 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2293 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2294 } |
342 | 2295 } |
2296 } | |
2297 | |
2298 bool G1CollectedHeap::is_in(const void* p) const { | |
2299 if (_g1_committed.contains(p)) { | |
2300 HeapRegion* hr = _hrs->addr_to_region(p); | |
2301 return hr->is_in(p); | |
2302 } else { | |
2303 return _perm_gen->as_gen()->is_in(p); | |
2304 } | |
2305 } | |
2306 | |
2307 // Iteration functions. | |
2308 | |
2309 // Iterates an OopClosure over all ref-containing fields of objects | |
2310 // within a HeapRegion. | |
2311 | |
2312 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2313 MemRegion _mr; | |
2314 OopClosure* _cl; | |
2315 public: | |
2316 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2317 : _mr(mr), _cl(cl) {} | |
2318 bool doHeapRegion(HeapRegion* r) { | |
2319 if (! r->continuesHumongous()) { | |
2320 r->oop_iterate(_cl); | |
2321 } | |
2322 return false; | |
2323 } | |
2324 }; | |
2325 | |
678 | 2326 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2327 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2328 _hrs->iterate(&blk); | |
678 | 2329 if (do_perm) { |
2330 perm_gen()->oop_iterate(cl); | |
2331 } | |
342 | 2332 } |
2333 | |
678 | 2334 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2335 IterateOopClosureRegionClosure blk(mr, cl); |
2336 _hrs->iterate(&blk); | |
678 | 2337 if (do_perm) { |
2338 perm_gen()->oop_iterate(cl); | |
2339 } | |
342 | 2340 } |
2341 | |
2342 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2343 | |
2344 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2345 ObjectClosure* _cl; | |
2346 public: | |
2347 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2348 bool doHeapRegion(HeapRegion* r) { | |
2349 if (! r->continuesHumongous()) { | |
2350 r->object_iterate(_cl); | |
2351 } | |
2352 return false; | |
2353 } | |
2354 }; | |
2355 | |
678 | 2356 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2357 IterateObjectClosureRegionClosure blk(cl); |
2358 _hrs->iterate(&blk); | |
678 | 2359 if (do_perm) { |
2360 perm_gen()->object_iterate(cl); | |
2361 } | |
342 | 2362 } |
2363 | |
2364 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2365 // FIXME: is this right? | |
2366 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2367 } | |
2368 | |
2369 // Calls a SpaceClosure on a HeapRegion. | |
2370 | |
2371 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2372 SpaceClosure* _cl; | |
2373 public: | |
2374 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2375 bool doHeapRegion(HeapRegion* r) { | |
2376 _cl->do_space(r); | |
2377 return false; | |
2378 } | |
2379 }; | |
2380 | |
2381 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2382 SpaceClosureRegionClosure blk(cl); | |
2383 _hrs->iterate(&blk); | |
2384 } | |
2385 | |
2386 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
2387 _hrs->iterate(cl); | |
2388 } | |
2389 | |
2390 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
2391 HeapRegionClosure* cl) { | |
2392 _hrs->iterate_from(r, cl); | |
2393 } | |
2394 | |
2395 void | |
2396 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
2397 _hrs->iterate_from(idx, cl); | |
2398 } | |
2399 | |
2400 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
2401 | |
2402 void | |
2403 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2404 int worker, | |
2405 jint claim_value) { | |
355 | 2406 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2407 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2408 // try to spread out the starting points of the workers |
2409 const size_t start_index = regions / worker_num * (size_t) worker; | |
2410 | |
2411 // each worker will actually look at all regions | |
2412 for (size_t count = 0; count < regions; ++count) { | |
2413 const size_t index = (start_index + count) % regions; | |
2414 assert(0 <= index && index < regions, "sanity"); | |
2415 HeapRegion* r = region_at(index); | |
2416 // we'll ignore "continues humongous" regions (we'll process them | |
2417 // when we come across their corresponding "start humongous" | |
2418 // region) and regions already claimed | |
2419 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2420 continue; | |
2421 } | |
2422 // OK, try to claim it | |
342 | 2423 if (r->claimHeapRegion(claim_value)) { |
355 | 2424 // success! |
2425 assert(!r->continuesHumongous(), "sanity"); | |
2426 if (r->startsHumongous()) { | |
2427 // If the region is "starts humongous" we'll iterate over its | |
2428 // "continues humongous" first; in fact we'll do them | |
2429 // first. The order is important. In on case, calling the | |
2430 // closure on the "starts humongous" region might de-allocate | |
2431 // and clear all its "continues humongous" regions and, as a | |
2432 // result, we might end up processing them twice. So, we'll do | |
2433 // them first (notice: most closures will ignore them anyway) and | |
2434 // then we'll do the "starts humongous" region. | |
2435 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2436 HeapRegion* chr = region_at(ch_index); | |
2437 | |
2438 // if the region has already been claimed or it's not | |
2439 // "continues humongous" we're done | |
2440 if (chr->claim_value() == claim_value || | |
2441 !chr->continuesHumongous()) { | |
2442 break; | |
2443 } | |
2444 | |
2445 // Noone should have claimed it directly. We can given | |
2446 // that we claimed its "starts humongous" region. | |
2447 assert(chr->claim_value() != claim_value, "sanity"); | |
2448 assert(chr->humongous_start_region() == r, "sanity"); | |
2449 | |
2450 if (chr->claimHeapRegion(claim_value)) { | |
2451 // we should always be able to claim it; noone else should | |
2452 // be trying to claim this region | |
2453 | |
2454 bool res2 = cl->doHeapRegion(chr); | |
2455 assert(!res2, "Should not abort"); | |
2456 | |
2457 // Right now, this holds (i.e., no closure that actually | |
2458 // does something with "continues humongous" regions | |
2459 // clears them). We might have to weaken it in the future, | |
2460 // but let's leave these two asserts here for extra safety. | |
2461 assert(chr->continuesHumongous(), "should still be the case"); | |
2462 assert(chr->humongous_start_region() == r, "sanity"); | |
2463 } else { | |
2464 guarantee(false, "we should not reach here"); | |
2465 } | |
2466 } | |
2467 } | |
2468 | |
2469 assert(!r->continuesHumongous(), "sanity"); | |
2470 bool res = cl->doHeapRegion(r); | |
2471 assert(!res, "Should not abort"); | |
2472 } | |
2473 } | |
2474 } | |
2475 | |
390 | 2476 class ResetClaimValuesClosure: public HeapRegionClosure { |
2477 public: | |
2478 bool doHeapRegion(HeapRegion* r) { | |
2479 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2480 return false; | |
2481 } | |
2482 }; | |
2483 | |
2484 void | |
2485 G1CollectedHeap::reset_heap_region_claim_values() { | |
2486 ResetClaimValuesClosure blk; | |
2487 heap_region_iterate(&blk); | |
2488 } | |
2489 | |
355 | 2490 #ifdef ASSERT |
2491 // This checks whether all regions in the heap have the correct claim | |
2492 // value. I also piggy-backed on this a check to ensure that the | |
2493 // humongous_start_region() information on "continues humongous" | |
2494 // regions is correct. | |
2495 | |
2496 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2497 private: | |
2498 jint _claim_value; | |
2499 size_t _failures; | |
2500 HeapRegion* _sh_region; | |
2501 public: | |
2502 CheckClaimValuesClosure(jint claim_value) : | |
2503 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2504 bool doHeapRegion(HeapRegion* r) { | |
2505 if (r->claim_value() != _claim_value) { | |
2506 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2507 "claim value = %d, should be %d", | |
2508 r->bottom(), r->end(), r->claim_value(), | |
2509 _claim_value); | |
2510 ++_failures; | |
2511 } | |
2512 if (!r->isHumongous()) { | |
2513 _sh_region = NULL; | |
2514 } else if (r->startsHumongous()) { | |
2515 _sh_region = r; | |
2516 } else if (r->continuesHumongous()) { | |
2517 if (r->humongous_start_region() != _sh_region) { | |
2518 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2519 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2520 r->bottom(), r->end(), | |
2521 r->humongous_start_region(), | |
2522 _sh_region); | |
2523 ++_failures; | |
342 | 2524 } |
2525 } | |
355 | 2526 return false; |
2527 } | |
2528 size_t failures() { | |
2529 return _failures; | |
2530 } | |
2531 }; | |
2532 | |
2533 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2534 CheckClaimValuesClosure cl(claim_value); | |
2535 heap_region_iterate(&cl); | |
2536 return cl.failures() == 0; | |
2537 } | |
2538 #endif // ASSERT | |
342 | 2539 |
2540 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2541 HeapRegion* r = g1_policy()->collection_set(); | |
2542 while (r != NULL) { | |
2543 HeapRegion* next = r->next_in_collection_set(); | |
2544 if (cl->doHeapRegion(r)) { | |
2545 cl->incomplete(); | |
2546 return; | |
2547 } | |
2548 r = next; | |
2549 } | |
2550 } | |
2551 | |
2552 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2553 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2554 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2555 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2556 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2557 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2558 |
342 | 2559 assert(r->in_collection_set(), |
2560 "Start region must be a member of the collection set."); | |
2561 HeapRegion* cur = r; | |
2562 while (cur != NULL) { | |
2563 HeapRegion* next = cur->next_in_collection_set(); | |
2564 if (cl->doHeapRegion(cur) && false) { | |
2565 cl->incomplete(); | |
2566 return; | |
2567 } | |
2568 cur = next; | |
2569 } | |
2570 cur = g1_policy()->collection_set(); | |
2571 while (cur != r) { | |
2572 HeapRegion* next = cur->next_in_collection_set(); | |
2573 if (cl->doHeapRegion(cur) && false) { | |
2574 cl->incomplete(); | |
2575 return; | |
2576 } | |
2577 cur = next; | |
2578 } | |
2579 } | |
2580 | |
2581 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2582 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2583 } | |
2584 | |
2585 | |
2586 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2587 Space* res = heap_region_containing(addr); | |
2588 if (res == NULL) | |
2589 res = perm_gen()->space_containing(addr); | |
2590 return res; | |
2591 } | |
2592 | |
2593 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2594 Space* sp = space_containing(addr); | |
2595 if (sp != NULL) { | |
2596 return sp->block_start(addr); | |
2597 } | |
2598 return NULL; | |
2599 } | |
2600 | |
2601 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2602 Space* sp = space_containing(addr); | |
2603 assert(sp != NULL, "block_size of address outside of heap"); | |
2604 return sp->block_size(addr); | |
2605 } | |
2606 | |
2607 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2608 Space* sp = space_containing(addr); | |
2609 return sp->block_is_obj(addr); | |
2610 } | |
2611 | |
2612 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2613 return true; | |
2614 } | |
2615 | |
2616 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2617 return HeapRegion::GrainBytes; | |
2618 } | |
2619 | |
2620 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2621 // Return the remaining space in the cur alloc region, but not less than | |
2622 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2623 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2624 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2625 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2626 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2627 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2628 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2629 // between when we test for NULL and when we use it later. |
342 | 2630 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2631 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2632 |
342 | 2633 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2634 return max_tlab_size; |
342 | 2635 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2636 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2637 max_tlab_size); |
342 | 2638 } |
2639 } | |
2640 | |
2641 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2642 return false; | |
2643 } | |
2644 | |
2645 size_t G1CollectedHeap::large_typearray_limit() { | |
2646 // FIXME | |
2647 return HeapRegion::GrainBytes/HeapWordSize; | |
2648 } | |
2649 | |
2650 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2651 return g1_reserved_obj_bytes(); |
342 | 2652 } |
2653 | |
2654 jlong G1CollectedHeap::millis_since_last_gc() { | |
2655 // assert(false, "NYI"); | |
2656 return 0; | |
2657 } | |
2658 | |
2659 | |
2660 void G1CollectedHeap::prepare_for_verify() { | |
2661 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2662 ensure_parsability(false); | |
2663 } | |
2664 g1_rem_set()->prepare_for_verify(); | |
2665 } | |
2666 | |
2667 class VerifyLivenessOopClosure: public OopClosure { | |
2668 G1CollectedHeap* g1h; | |
2669 public: | |
2670 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2671 g1h = _g1h; | |
2672 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2673 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2674 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2675 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2676 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2677 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2678 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2679 "Dead object referenced by a not dead object"); |
342 | 2680 } |
2681 }; | |
2682 | |
2683 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2684 private: |
342 | 2685 G1CollectedHeap* _g1h; |
2686 size_t _live_bytes; | |
2687 HeapRegion *_hr; | |
811 | 2688 bool _use_prev_marking; |
342 | 2689 public: |
811 | 2690 // use_prev_marking == true -> use "prev" marking information, |
2691 // use_prev_marking == false -> use "next" marking information | |
2692 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2693 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2694 _g1h = G1CollectedHeap::heap(); |
2695 } | |
2696 void do_object(oop o) { | |
2697 VerifyLivenessOopClosure isLive(_g1h); | |
2698 assert(o != NULL, "Huh?"); | |
811 | 2699 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2700 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2701 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2702 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2703 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2704 } |
342 | 2705 } |
2706 } | |
2707 size_t live_bytes() { return _live_bytes; } | |
2708 }; | |
2709 | |
2710 class PrintObjsInRegionClosure : public ObjectClosure { | |
2711 HeapRegion *_hr; | |
2712 G1CollectedHeap *_g1; | |
2713 public: | |
2714 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2715 _g1 = G1CollectedHeap::heap(); | |
2716 }; | |
2717 | |
2718 void do_object(oop o) { | |
2719 if (o != NULL) { | |
2720 HeapWord *start = (HeapWord *) o; | |
2721 size_t word_sz = o->size(); | |
2722 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2723 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2724 (void*) o, word_sz, | |
2725 _g1->isMarkedPrev(o), | |
2726 _g1->isMarkedNext(o), | |
2727 _hr->obj_allocated_since_prev_marking(o)); | |
2728 HeapWord *end = start + word_sz; | |
2729 HeapWord *cur; | |
2730 int *val; | |
2731 for (cur = start; cur < end; cur++) { | |
2732 val = (int *) cur; | |
2733 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2734 } | |
2735 } | |
2736 } | |
2737 }; | |
2738 | |
2739 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2740 private: |
342 | 2741 bool _allow_dirty; |
390 | 2742 bool _par; |
811 | 2743 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2744 bool _failures; |
811 | 2745 public: |
2746 // use_prev_marking == true -> use "prev" marking information, | |
2747 // use_prev_marking == false -> use "next" marking information | |
2748 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2749 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2750 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2751 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2752 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2753 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2754 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2755 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2756 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2757 |
342 | 2758 bool doHeapRegion(HeapRegion* r) { |
390 | 2759 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2760 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2761 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2762 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2763 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2764 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2765 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2766 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2767 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2768 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2769 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2770 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2771 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2772 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2773 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2774 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2775 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2776 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2777 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2778 } |
342 | 2779 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2780 return false; // stop the region iteration if we hit a failure |
342 | 2781 } |
2782 }; | |
2783 | |
2784 class VerifyRootsClosure: public OopsInGenClosure { | |
2785 private: | |
2786 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2787 bool _use_prev_marking; |
342 | 2788 bool _failures; |
2789 public: | |
811 | 2790 // use_prev_marking == true -> use "prev" marking information, |
2791 // use_prev_marking == false -> use "next" marking information | |
2792 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2793 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2794 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2795 _failures(false) { } |
342 | 2796 |
2797 bool failures() { return _failures; } | |
2798 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2799 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2800 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2801 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2802 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2803 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2804 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2805 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2806 obj->print_on(gclog_or_tty); |
2807 _failures = true; | |
2808 } | |
2809 } | |
2810 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2811 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2812 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2813 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2814 }; |
2815 | |
390 | 2816 // This is the task used for parallel heap verification. |
2817 | |
2818 class G1ParVerifyTask: public AbstractGangTask { | |
2819 private: | |
2820 G1CollectedHeap* _g1h; | |
2821 bool _allow_dirty; | |
811 | 2822 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2823 bool _failures; |
390 | 2824 |
2825 public: | |
811 | 2826 // use_prev_marking == true -> use "prev" marking information, |
2827 // use_prev_marking == false -> use "next" marking information | |
2828 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2829 bool use_prev_marking) : | |
390 | 2830 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2831 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2832 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2833 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2834 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2835 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2836 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2837 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2838 } |
390 | 2839 |
2840 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2841 HandleMark hm; |
811 | 2842 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2843 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2844 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2845 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2846 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2847 } |
390 | 2848 } |
2849 }; | |
2850 | |
342 | 2851 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2852 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2853 } | |
2854 | |
2855 void G1CollectedHeap::verify(bool allow_dirty, | |
2856 bool silent, | |
2857 bool use_prev_marking) { | |
342 | 2858 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2859 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2860 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2861 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2862 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2863 false, |
342 | 2864 SharedHeap::SO_AllClasses, |
2865 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2866 &blobsCl, |
342 | 2867 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2868 bool failures = rootsCl.failures(); |
342 | 2869 rem_set()->invalidate(perm_gen()->used_region(), false); |
2870 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2871 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2872 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2873 "sanity check"); | |
2874 | |
811 | 2875 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2876 int n_workers = workers()->total_workers(); |
2877 set_par_threads(n_workers); | |
2878 workers()->run_task(&task); | |
2879 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2880 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2881 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2882 } |
390 | 2883 |
2884 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2885 "sanity check"); | |
2886 | |
2887 reset_heap_region_claim_values(); | |
2888 | |
2889 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2890 "sanity check"); | |
2891 } else { | |
811 | 2892 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2893 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2894 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2895 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2896 } |
390 | 2897 } |
342 | 2898 if (!silent) gclog_or_tty->print("remset "); |
2899 rem_set()->verify(); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2900 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2901 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2902 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2903 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2904 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2905 #ifndef PRODUCT |
1044 | 2906 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2907 concurrent_mark()->print_reachable("at-verification-failure", |
2908 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2909 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2910 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2911 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2912 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2913 guarantee(!failures, "there should not have been any failures"); |
342 | 2914 } else { |
2915 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2916 } | |
2917 } | |
2918 | |
2919 class PrintRegionClosure: public HeapRegionClosure { | |
2920 outputStream* _st; | |
2921 public: | |
2922 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2923 bool doHeapRegion(HeapRegion* r) { | |
2924 r->print_on(_st); | |
2925 return false; | |
2926 } | |
2927 }; | |
2928 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2929 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2930 |
2931 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2932 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2933 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2934 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2935 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2936 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2937 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2938 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2939 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2940 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2941 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2942 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2943 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2944 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2945 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2946 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2947 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2948 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2949 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2950 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2951 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2952 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2953 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2954 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2955 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2956 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2957 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2958 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2959 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2960 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2961 PrintRegionClosure blk(st); |
2962 _hrs->iterate(&blk); | |
2963 } | |
2964 | |
2965 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2966 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2967 workers()->print_worker_threads_on(st); |
2968 } | |
2969 | |
2970 _cmThread->print_on(st); | |
342 | 2971 st->cr(); |
1019 | 2972 |
2973 _cm->print_worker_threads_on(st); | |
2974 | |
2975 _cg1r->print_worker_threads_on(st); | |
2976 | |
342 | 2977 _czft->print_on(st); |
2978 st->cr(); | |
2979 } | |
2980 | |
2981 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2982 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 2983 workers()->threads_do(tc); |
2984 } | |
2985 tc->do_thread(_cmThread); | |
794 | 2986 _cg1r->threads_do(tc); |
342 | 2987 tc->do_thread(_czft); |
2988 } | |
2989 | |
2990 void G1CollectedHeap::print_tracing_info() const { | |
2991 // We'll overload this to mean "trace GC pause statistics." | |
2992 if (TraceGen0Time || TraceGen1Time) { | |
2993 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2994 // to that. | |
2995 g1_policy()->print_tracing_info(); | |
2996 } | |
751 | 2997 if (G1SummarizeRSetStats) { |
342 | 2998 g1_rem_set()->print_summary_info(); |
2999 } | |
1282 | 3000 if (G1SummarizeConcMark) { |
342 | 3001 concurrent_mark()->print_summary_info(); |
3002 } | |
751 | 3003 if (G1SummarizeZFStats) { |
342 | 3004 ConcurrentZFThread::print_summary_info(); |
3005 } | |
3006 g1_policy()->print_yg_surv_rate_info(); | |
3007 | |
3008 SpecializationStats::print(); | |
3009 } | |
3010 | |
3011 | |
3012 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
3013 HeapRegion* hr = heap_region_containing(addr); | |
3014 if (hr == NULL) { | |
3015 return 0; | |
3016 } else { | |
3017 return 1; | |
3018 } | |
3019 } | |
3020 | |
3021 G1CollectedHeap* G1CollectedHeap::heap() { | |
3022 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
3023 "not a garbage-first heap"); | |
3024 return _g1h; | |
3025 } | |
3026 | |
3027 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3028 // always_do_update_barrier = false; |
342 | 3029 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3030 // Call allocation profiler | |
3031 AllocationProfiler::iterate_since_last_gc(); | |
3032 // Fill TLAB's and such | |
3033 ensure_parsability(true); | |
3034 } | |
3035 | |
3036 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
3037 // FIXME: what is this about? | |
3038 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
3039 // is set. | |
3040 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
3041 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3042 // always_do_update_barrier = true; |
342 | 3043 } |
3044 | |
1973 | 3045 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3046 unsigned int gc_count_before, | |
3047 bool* succeeded) { | |
3048 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3049 g1_policy()->record_stop_world_start(); |
1973 | 3050 VM_G1IncCollectionPause op(gc_count_before, |
3051 word_size, | |
3052 false, /* should_initiate_conc_mark */ | |
3053 g1_policy()->max_pause_time_ms(), | |
3054 GCCause::_g1_inc_collection_pause); | |
3055 VMThread::execute(&op); | |
3056 | |
3057 HeapWord* result = op.result(); | |
3058 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3059 assert(result == NULL || ret_succeeded, | |
3060 "the result should be NULL if the VM did not succeed"); | |
3061 *succeeded = ret_succeeded; | |
3062 | |
3063 assert_heap_not_locked(); | |
3064 return result; | |
342 | 3065 } |
3066 | |
3067 void | |
3068 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3069 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3070 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3071 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3072 CGC_lock->notify(); |
342 | 3073 } |
3074 } | |
3075 | |
3076 class VerifyMarkedObjsClosure: public ObjectClosure { | |
3077 G1CollectedHeap* _g1h; | |
3078 public: | |
3079 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
3080 void do_object(oop obj) { | |
3081 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
3082 "markandsweep mark should agree with concurrent deadness"); | |
3083 } | |
3084 }; | |
3085 | |
3086 void | |
3087 G1CollectedHeap::checkConcurrentMark() { | |
3088 VerifyMarkedObjsClosure verifycl(this); | |
3089 // MutexLockerEx x(getMarkBitMapLock(), | |
3090 // Mutex::_no_safepoint_check_flag); | |
678 | 3091 object_iterate(&verifycl, false); |
342 | 3092 } |
3093 | |
3094 void G1CollectedHeap::do_sync_mark() { | |
3095 _cm->checkpointRootsInitial(); | |
3096 _cm->markFromRoots(); | |
3097 _cm->checkpointRootsFinal(false); | |
3098 } | |
3099 | |
3100 // <NEW PREDICTION> | |
3101 | |
3102 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3103 bool young) { | |
3104 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3105 } | |
3106 | |
3107 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3108 predicted_time_ms) { | |
3109 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3110 } | |
3111 | |
3112 size_t G1CollectedHeap::pending_card_num() { | |
3113 size_t extra_cards = 0; | |
3114 JavaThread *curr = Threads::first(); | |
3115 while (curr != NULL) { | |
3116 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3117 extra_cards += dcq.size(); | |
3118 curr = curr->next(); | |
3119 } | |
3120 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3121 size_t buffer_size = dcqs.buffer_size(); | |
3122 size_t buffer_num = dcqs.completed_buffers_num(); | |
3123 return buffer_size * buffer_num + extra_cards; | |
3124 } | |
3125 | |
3126 size_t G1CollectedHeap::max_pending_card_num() { | |
3127 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3128 size_t buffer_size = dcqs.buffer_size(); | |
3129 size_t buffer_num = dcqs.completed_buffers_num(); | |
3130 int thread_num = Threads::number_of_threads(); | |
3131 return (buffer_num + thread_num) * buffer_size; | |
3132 } | |
3133 | |
3134 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3135 return g1_rem_set()->cardsScanned(); |
342 | 3136 } |
3137 | |
3138 void | |
3139 G1CollectedHeap::setup_surviving_young_words() { | |
3140 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3141 size_t array_length = g1_policy()->young_cset_length(); | |
3142 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3143 if (_surviving_young_words == NULL) { | |
3144 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3145 "Not enough space for young surv words summary."); | |
3146 } | |
3147 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3148 #ifdef ASSERT |
342 | 3149 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3150 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3151 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3152 #endif // !ASSERT |
342 | 3153 } |
3154 | |
3155 void | |
3156 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3157 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3158 size_t array_length = g1_policy()->young_cset_length(); | |
3159 for (size_t i = 0; i < array_length; ++i) | |
3160 _surviving_young_words[i] += surv_young_words[i]; | |
3161 } | |
3162 | |
3163 void | |
3164 G1CollectedHeap::cleanup_surviving_young_words() { | |
3165 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3166 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3167 _surviving_young_words = NULL; | |
3168 } | |
3169 | |
3170 // </NEW PREDICTION> | |
3171 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3172 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3173 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3174 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3175 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3176 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3177 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3178 |
1709 | 3179 #if TASKQUEUE_STATS |
3180 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3181 st->print_raw_cr("GC Task Stats"); | |
3182 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3183 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3184 } | |
3185 | |
3186 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3187 print_taskqueue_stats_hdr(st); | |
3188 | |
3189 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3190 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3191 for (int i = 0; i < n; ++i) { |
3192 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3193 totals += task_queue(i)->stats; | |
3194 } | |
3195 st->print_raw("tot "); totals.print(st); st->cr(); | |
3196 | |
3197 DEBUG_ONLY(totals.verify()); | |
3198 } | |
3199 | |
3200 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3201 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3202 for (int i = 0; i < n; ++i) { |
3203 task_queue(i)->stats.reset(); | |
3204 } | |
3205 } | |
3206 #endif // TASKQUEUE_STATS | |
3207 | |
1973 | 3208 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3209 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3210 if (GC_locker::check_active_before_gc()) { |
1973 | 3211 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3212 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3213 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3214 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3215 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3216 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3217 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3218 { |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3219 ResourceMark rm; |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3220 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3221 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3222 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3223 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3224 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3225 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3226 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3227 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3228 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3229 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3230 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3231 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3232 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3233 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3234 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3235 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3236 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3237 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3238 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3239 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3240 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3241 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3242 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3243 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3244 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3245 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3246 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3247 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3248 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3249 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3250 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3251 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3252 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3253 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3254 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3255 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3256 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3257 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3258 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3259 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3260 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3261 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3262 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3263 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3264 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3265 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 increment_total_collections(false /* full gc */); |
342 | 3267 |
3268 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3269 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3270 print(); |
342 | 3271 #endif |
3272 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3273 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3274 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3275 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3276 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3277 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3278 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3279 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3280 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3281 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3282 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3283 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3284 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3285 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3286 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3287 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3288 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3289 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3290 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3291 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3292 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3293 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3294 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3295 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3296 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3297 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3298 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3299 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3300 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3301 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3302 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3303 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3304 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3305 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3306 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3307 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3308 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3309 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3310 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3311 #endif // YOUNG_LIST_VERBOSE |
342 | 3312 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3313 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3314 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3315 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3316 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3317 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3318 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3319 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3320 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3321 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3322 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3323 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3324 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3325 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3326 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3327 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3328 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3329 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3330 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3331 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3332 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3333 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3334 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3335 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3336 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3337 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3338 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3339 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3340 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3341 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3342 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3343 |
1707 | 3344 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3345 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3346 // Nothing to do if we were unable to choose a collection set. |
342 | 3347 #if G1_REM_SET_LOGGING |
1707 | 3348 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3349 print(); | |
342 | 3350 #endif |
1707 | 3351 PrepareForRSScanningClosure prepare_for_rs_scan; |
3352 collection_set_iterate(&prepare_for_rs_scan); | |
3353 | |
3354 setup_surviving_young_words(); | |
3355 | |
3356 // Set up the gc allocation regions. | |
3357 get_gc_alloc_regions(); | |
3358 | |
3359 // Actually do the work... | |
3360 evacuate_collection_set(); | |
3361 | |
3362 free_collection_set(g1_policy()->collection_set()); | |
3363 g1_policy()->clear_collection_set(); | |
3364 | |
3365 cleanup_surviving_young_words(); | |
3366 | |
3367 // Start a new incremental collection set for the next pause. | |
3368 g1_policy()->start_incremental_cset_building(); | |
3369 | |
3370 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3371 // regions to the incremental collection set for the next | |
3372 // evacuation pause. | |
3373 clear_cset_fast_test(); | |
3374 | |
3375 if (g1_policy()->in_young_gc_mode()) { | |
3376 _young_list->reset_sampled_info(); | |
3377 | |
3378 // Don't check the whole heap at this point as the | |
3379 // GC alloc regions from this pause have been tagged | |
3380 // as survivors and moved on to the survivor list. | |
3381 // Survivor regions will fail the !is_young() check. | |
3382 assert(check_young_list_empty(false /* check_heap */), | |
3383 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3384 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3385 #if YOUNG_LIST_VERBOSE |
1707 | 3386 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3387 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3388 #endif // YOUNG_LIST_VERBOSE |
342 | 3389 |
1707 | 3390 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3391 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3392 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3393 |
1707 | 3394 _young_list->reset_auxilary_lists(); |
342 | 3395 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3396 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3397 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3398 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3399 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3400 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3401 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3402 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3403 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3404 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3405 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3406 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3407 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3408 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3409 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3410 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3411 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3412 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3413 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3414 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3415 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3416 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3417 } |
342 | 3418 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3419 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3420 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3421 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3422 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3423 #endif // YOUNG_LIST_VERBOSE |
342 | 3424 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3425 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3426 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3427 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3428 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3429 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3430 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3431 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3432 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3433 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3434 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3435 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3436 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3437 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3438 Universe::verify(false); |
342 | 3439 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3440 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3441 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3442 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3443 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3444 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3445 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3446 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3447 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3448 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3449 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3450 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3451 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3452 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3453 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3454 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3455 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3456 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3457 #endif |
342 | 3458 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3459 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3460 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3461 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3462 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3463 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3464 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3465 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3466 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3467 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3468 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3469 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3470 |
1709 | 3471 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3472 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3473 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3474 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3475 Universe::print_heap_after_gc(); |
342 | 3476 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3477 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3478 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3479 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3480 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3481 } |
1973 | 3482 |
3483 return true; | |
342 | 3484 } |
3485 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3486 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3487 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3488 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3489 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3490 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3491 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3492 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3493 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3494 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3495 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3496 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3497 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3498 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3499 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3500 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3501 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3502 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3503 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3504 |
342 | 3505 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3506 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3507 // make sure we don't call set_gc_alloc_region() multiple times on |
3508 // the same region | |
3509 assert(r == NULL || !r->is_gc_alloc_region(), | |
3510 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3511 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3512 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3513 |
342 | 3514 HeapWord* original_top = NULL; |
3515 if (r != NULL) | |
3516 original_top = r->top(); | |
3517 | |
3518 // We will want to record the used space in r as being there before gc. | |
3519 // One we install it as a GC alloc region it's eligible for allocation. | |
3520 // So record it now and use it later. | |
3521 size_t r_used = 0; | |
3522 if (r != NULL) { | |
3523 r_used = r->used(); | |
3524 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3525 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3526 // need to take the lock to guard against two threads calling |
3527 // get_gc_alloc_region concurrently (very unlikely but...) | |
3528 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3529 r->save_marks(); | |
3530 } | |
3531 } | |
3532 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3533 _gc_alloc_regions[purpose] = r; | |
3534 if (old_alloc_region != NULL) { | |
3535 // Replace aliases too. | |
3536 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3537 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3538 _gc_alloc_regions[ap] = r; | |
3539 } | |
3540 } | |
3541 } | |
3542 if (r != NULL) { | |
3543 push_gc_alloc_region(r); | |
3544 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3545 // We are using a region as a GC alloc region after it has been used | |
3546 // as a mutator allocation region during the current marking cycle. | |
3547 // The mutator-allocated objects are currently implicitly marked, but | |
3548 // when we move hr->next_top_at_mark_start() forward at the the end | |
3549 // of the GC pause, they won't be. We therefore mark all objects in | |
3550 // the "gap". We do this object-by-object, since marking densely | |
3551 // does not currently work right with marking bitmap iteration. This | |
3552 // means we rely on TLAB filling at the start of pauses, and no | |
3553 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3554 // to fix the marking bitmap iteration. | |
3555 HeapWord* curhw = r->next_top_at_mark_start(); | |
3556 HeapWord* t = original_top; | |
3557 | |
3558 while (curhw < t) { | |
3559 oop cur = (oop)curhw; | |
3560 // We'll assume parallel for generality. This is rare code. | |
3561 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3562 curhw = curhw + cur->size(); | |
3563 } | |
3564 assert(curhw == t, "Should have parsed correctly."); | |
3565 } | |
3566 if (G1PolicyVerbose > 1) { | |
3567 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3568 "for survivors:", r->bottom(), original_top, r->end()); | |
3569 r->print(); | |
3570 } | |
3571 g1_policy()->record_before_bytes(r_used); | |
3572 } | |
3573 } | |
3574 | |
3575 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3576 assert(Thread::current()->is_VM_thread() || | |
3577 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
3578 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
3579 "Precondition."); | |
3580 hr->set_is_gc_alloc_region(true); | |
3581 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3582 _gc_alloc_region_list = hr; | |
3583 } | |
3584 | |
3585 #ifdef G1_DEBUG | |
3586 class FindGCAllocRegion: public HeapRegionClosure { | |
3587 public: | |
3588 bool doHeapRegion(HeapRegion* r) { | |
3589 if (r->is_gc_alloc_region()) { | |
3590 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3591 r->hrs_index(), r->bottom()); | |
3592 } | |
3593 return false; | |
3594 } | |
3595 }; | |
3596 #endif // G1_DEBUG | |
3597 | |
3598 void G1CollectedHeap::forget_alloc_region_list() { | |
3599 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
3600 while (_gc_alloc_region_list != NULL) { | |
3601 HeapRegion* r = _gc_alloc_region_list; | |
3602 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3603 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3604 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3605 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3606 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3607 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3608 r->ContiguousSpace::set_saved_mark(); |
342 | 3609 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3610 r->set_next_gc_alloc_region(NULL); | |
3611 r->set_is_gc_alloc_region(false); | |
545 | 3612 if (r->is_survivor()) { |
3613 if (r->is_empty()) { | |
3614 r->set_not_young(); | |
3615 } else { | |
3616 _young_list->add_survivor_region(r); | |
3617 } | |
3618 } | |
342 | 3619 if (r->is_empty()) { |
3620 ++_free_regions; | |
3621 } | |
3622 } | |
3623 #ifdef G1_DEBUG | |
3624 FindGCAllocRegion fa; | |
3625 heap_region_iterate(&fa); | |
3626 #endif // G1_DEBUG | |
3627 } | |
3628 | |
3629 | |
3630 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3631 // TODO: allocation regions check | |
3632 return true; | |
3633 } | |
3634 | |
3635 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3636 // First, let's check that the GC alloc region list is empty (it should) |
3637 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3638 | |
342 | 3639 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3640 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3641 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3642 |
342 | 3643 // Create new GC alloc regions. |
636 | 3644 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3645 _retained_gc_alloc_regions[ap] = NULL; | |
3646 | |
3647 if (alloc_region != NULL) { | |
3648 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3649 | |
3650 // let's make sure that the GC alloc region is not tagged as such | |
3651 // outside a GC operation | |
3652 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3653 | |
3654 if (alloc_region->in_collection_set() || | |
3655 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3656 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3657 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3658 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3659 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3660 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3661 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3662 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3663 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3664 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3665 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3666 // object that may be less than the region size). |
636 | 3667 |
3668 alloc_region = NULL; | |
3669 } | |
3670 } | |
3671 | |
3672 if (alloc_region == NULL) { | |
3673 // we will get a new GC alloc region | |
342 | 3674 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3675 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3676 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3677 ++_gc_alloc_region_counts[ap]; |
1388 | 3678 if (G1PrintHeapRegions) { |
3679 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3680 "top "PTR_FORMAT, | |
3681 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3682 } | |
342 | 3683 } |
636 | 3684 |
342 | 3685 if (alloc_region != NULL) { |
636 | 3686 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3687 set_gc_alloc_region(ap, alloc_region); |
3688 } | |
636 | 3689 |
3690 assert(_gc_alloc_regions[ap] == NULL || | |
3691 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3692 "the GC alloc region should be tagged as such"); | |
3693 assert(_gc_alloc_regions[ap] == NULL || | |
3694 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3695 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3696 } |
3697 // Set alternative regions for allocation purposes that have reached | |
636 | 3698 // their limit. |
342 | 3699 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3700 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3701 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3702 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3703 } | |
3704 } | |
3705 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3706 } | |
3707 | |
636 | 3708 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3709 // We keep a separate list of all regions that have been alloc regions in |
636 | 3710 // the current collection pause. Forget that now. This method will |
3711 // untag the GC alloc regions and tear down the GC alloc region | |
3712 // list. It's desirable that no regions are tagged as GC alloc | |
3713 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3714 |
342 | 3715 forget_alloc_region_list(); |
3716 | |
3717 // The current alloc regions contain objs that have survived | |
3718 // collection. Make them no longer GC alloc regions. | |
3719 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3720 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3721 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3722 _gc_alloc_region_counts[ap] = 0; |
636 | 3723 |
3724 if (r != NULL) { | |
3725 // we retain nothing on _gc_alloc_regions between GCs | |
3726 set_gc_alloc_region(ap, NULL); | |
3727 | |
3728 if (r->is_empty()) { | |
3729 // we didn't actually allocate anything in it; let's just put | |
3730 // it on the free list | |
342 | 3731 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3732 r->set_zero_fill_complete(); | |
3733 put_free_region_on_list_locked(r); | |
636 | 3734 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3735 // retain it so that we can use it at the beginning of the next GC | |
3736 _retained_gc_alloc_regions[ap] = r; | |
342 | 3737 } |
3738 } | |
636 | 3739 } |
3740 } | |
3741 | |
3742 #ifndef PRODUCT | |
3743 // Useful for debugging | |
3744 | |
3745 void G1CollectedHeap::print_gc_alloc_regions() { | |
3746 gclog_or_tty->print_cr("GC alloc regions"); | |
3747 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3748 HeapRegion* r = _gc_alloc_regions[ap]; | |
3749 if (r == NULL) { | |
3750 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3751 } else { | |
3752 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3753 ap, r->bottom(), r->used()); | |
3754 } | |
3755 } | |
3756 } | |
3757 #endif // PRODUCT | |
342 | 3758 |
3759 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3760 _drain_in_progress = false; | |
3761 set_evac_failure_closure(cl); | |
3762 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3763 } | |
3764 | |
3765 void G1CollectedHeap::finalize_for_evac_failure() { | |
3766 assert(_evac_failure_scan_stack != NULL && | |
3767 _evac_failure_scan_stack->length() == 0, | |
3768 "Postcondition"); | |
3769 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3770 delete _evac_failure_scan_stack; |
342 | 3771 _evac_failure_scan_stack = NULL; |
3772 } | |
3773 | |
3774 | |
3775 | |
3776 // *** Sequential G1 Evacuation | |
3777 | |
3778 class G1IsAliveClosure: public BoolObjectClosure { | |
3779 G1CollectedHeap* _g1; | |
3780 public: | |
3781 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3782 void do_object(oop p) { assert(false, "Do not call."); } | |
3783 bool do_object_b(oop p) { | |
3784 // It is reachable if it is outside the collection set, or is inside | |
3785 // and forwarded. | |
3786 | |
3787 #ifdef G1_DEBUG | |
3788 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3789 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3790 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3791 #endif // G1_DEBUG | |
3792 | |
3793 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3794 } | |
3795 }; | |
3796 | |
3797 class G1KeepAliveClosure: public OopClosure { | |
3798 G1CollectedHeap* _g1; | |
3799 public: | |
3800 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3801 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3802 void do_oop( oop* p) { |
342 | 3803 oop obj = *p; |
3804 #ifdef G1_DEBUG | |
3805 if (PrintGC && Verbose) { | |
3806 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3807 p, (void*) obj, (void*) *p); | |
3808 } | |
3809 #endif // G1_DEBUG | |
3810 | |
3811 if (_g1->obj_in_cs(obj)) { | |
3812 assert( obj->is_forwarded(), "invariant" ); | |
3813 *p = obj->forwardee(); | |
3814 #ifdef G1_DEBUG | |
3815 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3816 (void*) obj, (void*) *p); | |
3817 #endif // G1_DEBUG | |
3818 } | |
3819 } | |
3820 }; | |
3821 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3822 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3823 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3824 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3825 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3826 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3827 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3828 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3829 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3830 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3831 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3832 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3833 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3834 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3835 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3836 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3837 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3838 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3839 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3840 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3841 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3842 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3843 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3844 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3845 |
342 | 3846 class RemoveSelfPointerClosure: public ObjectClosure { |
3847 private: | |
3848 G1CollectedHeap* _g1; | |
3849 ConcurrentMark* _cm; | |
3850 HeapRegion* _hr; | |
3851 size_t _prev_marked_bytes; | |
3852 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3853 OopsInHeapRegionClosure *_cl; |
342 | 3854 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3855 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3856 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3857 _next_marked_bytes(0), _cl(cl) {} |
342 | 3858 |
3859 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3860 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3861 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3862 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3863 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3864 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3865 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3866 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3867 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3868 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3869 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3870 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3871 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3872 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3873 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3874 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3875 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3876 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3877 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3878 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3879 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3880 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3881 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3882 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3883 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3884 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3885 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3886 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3887 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3888 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3889 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3890 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3891 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3892 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3893 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3894 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3895 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3896 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3897 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3898 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3899 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3900 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3901 CollectedHeap::fill_with_object(mr); |
342 | 3902 _cm->clearRangeBothMaps(mr); |
3903 } | |
3904 } | |
3905 }; | |
3906 | |
3907 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3908 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3909 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3910 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3911 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3912 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3913 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3914 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3915 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3916 } |
342 | 3917 HeapRegion* cur = g1_policy()->collection_set(); |
3918 while (cur != NULL) { | |
3919 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3920 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3921 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3922 if (cur->evacuation_failed()) { |
3923 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3924 cl->set_region(cur); |
342 | 3925 cur->object_iterate(&rspc); |
3926 | |
3927 // A number of manipulations to make the TAMS be the current top, | |
3928 // and the marked bytes be the ones observed in the iteration. | |
3929 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3930 // The comments below are the postconditions achieved by the | |
3931 // calls. Note especially the last such condition, which says that | |
3932 // the count of marked bytes has been properly restored. | |
3933 cur->note_start_of_marking(false); | |
3934 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3935 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3936 // _next_marked_bytes == prev_marked_bytes. | |
3937 cur->note_end_of_marking(); | |
3938 // _prev_top_at_mark_start == top(), | |
3939 // _prev_marked_bytes == prev_marked_bytes | |
3940 } | |
3941 // If there is no mark in progress, we modified the _next variables | |
3942 // above needlessly, but harmlessly. | |
3943 if (_g1h->mark_in_progress()) { | |
3944 cur->note_start_of_marking(false); | |
3945 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3946 // _next_marked_bytes == next_marked_bytes. | |
3947 } | |
3948 | |
3949 // Now make sure the region has the right index in the sorted array. | |
3950 g1_policy()->note_change_in_marked_bytes(cur); | |
3951 } | |
3952 cur = cur->next_in_collection_set(); | |
3953 } | |
3954 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3955 | |
3956 // Now restore saved marks, if any. | |
3957 if (_objs_with_preserved_marks != NULL) { | |
3958 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3959 assert(_objs_with_preserved_marks->length() == | |
3960 _preserved_marks_of_objs->length(), "Both or none."); | |
3961 guarantee(_objs_with_preserved_marks->length() == | |
3962 _preserved_marks_of_objs->length(), "Both or none."); | |
3963 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3964 oop obj = _objs_with_preserved_marks->at(i); | |
3965 markOop m = _preserved_marks_of_objs->at(i); | |
3966 obj->set_mark(m); | |
3967 } | |
3968 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3969 delete _objs_with_preserved_marks; | |
3970 delete _preserved_marks_of_objs; | |
3971 _objs_with_preserved_marks = NULL; | |
3972 _preserved_marks_of_objs = NULL; | |
3973 } | |
3974 } | |
3975 | |
3976 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3977 _evac_failure_scan_stack->push(obj); | |
3978 } | |
3979 | |
3980 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3981 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3982 | |
3983 while (_evac_failure_scan_stack->length() > 0) { | |
3984 oop obj = _evac_failure_scan_stack->pop(); | |
3985 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3986 obj->oop_iterate_backwards(_evac_failure_closure); | |
3987 } | |
3988 } | |
3989 | |
3990 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3991 markOop m = old->mark(); | |
3992 // forward to self | |
3993 assert(!old->is_forwarded(), "precondition"); | |
3994 | |
3995 old->forward_to(old); | |
3996 handle_evacuation_failure_common(old, m); | |
3997 } | |
3998 | |
3999 oop | |
4000 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
4001 oop old) { | |
4002 markOop m = old->mark(); | |
4003 oop forward_ptr = old->forward_to_atomic(old); | |
4004 if (forward_ptr == NULL) { | |
4005 // Forward-to-self succeeded. | |
4006 if (_evac_failure_closure != cl) { | |
4007 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
4008 assert(!_drain_in_progress, | |
4009 "Should only be true while someone holds the lock."); | |
4010 // Set the global evac-failure closure to the current thread's. | |
4011 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
4012 set_evac_failure_closure(cl); | |
4013 // Now do the common part. | |
4014 handle_evacuation_failure_common(old, m); | |
4015 // Reset to NULL. | |
4016 set_evac_failure_closure(NULL); | |
4017 } else { | |
4018 // The lock is already held, and this is recursive. | |
4019 assert(_drain_in_progress, "This should only be the recursive case."); | |
4020 handle_evacuation_failure_common(old, m); | |
4021 } | |
4022 return old; | |
4023 } else { | |
4024 // Someone else had a place to copy it. | |
4025 return forward_ptr; | |
4026 } | |
4027 } | |
4028 | |
4029 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
4030 set_evacuation_failed(true); | |
4031 | |
4032 preserve_mark_if_necessary(old, m); | |
4033 | |
4034 HeapRegion* r = heap_region_containing(old); | |
4035 if (!r->evacuation_failed()) { | |
4036 r->set_evacuation_failed(true); | |
1282 | 4037 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4038 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 4039 "["PTR_FORMAT","PTR_FORMAT")\n", |
4040 r, r->bottom(), r->end()); | |
4041 } | |
4042 } | |
4043 | |
4044 push_on_evac_failure_scan_stack(old); | |
4045 | |
4046 if (!_drain_in_progress) { | |
4047 // prevent recursion in copy_to_survivor_space() | |
4048 _drain_in_progress = true; | |
4049 drain_evac_failure_scan_stack(); | |
4050 _drain_in_progress = false; | |
4051 } | |
4052 } | |
4053 | |
4054 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
4055 if (m != markOopDesc::prototype()) { | |
4056 if (_objs_with_preserved_marks == NULL) { | |
4057 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4058 _objs_with_preserved_marks = | |
4059 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4060 _preserved_marks_of_objs = | |
4061 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4062 } | |
4063 _objs_with_preserved_marks->push(obj); | |
4064 _preserved_marks_of_objs->push(m); | |
4065 } | |
4066 } | |
4067 | |
4068 // *** Parallel G1 Evacuation | |
4069 | |
4070 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4071 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4072 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4073 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4074 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4075 |
342 | 4076 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4077 // let the caller handle alloc failure | |
4078 if (alloc_region == NULL) return NULL; | |
4079 | |
4080 HeapWord* block = alloc_region->par_allocate(word_size); | |
4081 if (block == NULL) { | |
4082 MutexLockerEx x(par_alloc_during_gc_lock(), | |
4083 Mutex::_no_safepoint_check_flag); | |
4084 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4085 } | |
4086 return block; | |
4087 } | |
4088 | |
545 | 4089 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4090 bool par) { | |
4091 // Another thread might have obtained alloc_region for the given | |
4092 // purpose, and might be attempting to allocate in it, and might | |
4093 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4094 // region below until we're sure the last allocation has happened. | |
4095 // We ensure this by allocating the remaining space with a garbage | |
4096 // object. | |
4097 if (par) par_allocate_remaining_space(alloc_region); | |
4098 // Now we can do the post-GC stuff on the region. | |
4099 alloc_region->note_end_of_copying(); | |
4100 g1_policy()->record_after_bytes(alloc_region->used()); | |
4101 } | |
4102 | |
342 | 4103 HeapWord* |
4104 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4105 HeapRegion* alloc_region, | |
4106 bool par, | |
4107 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4108 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4109 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4110 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4111 |
342 | 4112 HeapWord* block = NULL; |
4113 // In the parallel case, a previous thread to obtain the lock may have | |
4114 // already assigned a new gc_alloc_region. | |
4115 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4116 assert(par, "But should only happen in parallel case."); | |
4117 alloc_region = _gc_alloc_regions[purpose]; | |
4118 if (alloc_region == NULL) return NULL; | |
4119 block = alloc_region->par_allocate(word_size); | |
4120 if (block != NULL) return block; | |
4121 // Otherwise, continue; this new region is empty, too. | |
4122 } | |
4123 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4124 retire_alloc_region(alloc_region, par); |
342 | 4125 |
4126 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4127 // Cannot allocate more regions for the given purpose. | |
4128 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4129 // Is there an alternative? | |
4130 if (purpose != alt_purpose) { | |
4131 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4132 // Has not the alternative region been aliased? | |
545 | 4133 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4134 // Try to allocate in the alternative region. |
4135 if (par) { | |
4136 block = alt_region->par_allocate(word_size); | |
4137 } else { | |
4138 block = alt_region->allocate(word_size); | |
4139 } | |
4140 // Make an alias. | |
4141 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4142 if (block != NULL) { |
4143 return block; | |
4144 } | |
4145 retire_alloc_region(alt_region, par); | |
342 | 4146 } |
4147 // Both the allocation region and the alternative one are full | |
4148 // and aliased, replace them with a new allocation region. | |
4149 purpose = alt_purpose; | |
4150 } else { | |
4151 set_gc_alloc_region(purpose, NULL); | |
4152 return NULL; | |
4153 } | |
4154 } | |
4155 | |
4156 // Now allocate a new region for allocation. | |
4157 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
4158 | |
4159 // let the caller handle alloc failure | |
4160 if (alloc_region != NULL) { | |
4161 | |
4162 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4163 assert(alloc_region->saved_mark_at_top(), | |
4164 "Mark should have been saved already."); | |
4165 // We used to assert that the region was zero-filled here, but no | |
4166 // longer. | |
4167 | |
4168 // This must be done last: once it's installed, other regions may | |
4169 // allocate in it (without holding the lock.) | |
4170 set_gc_alloc_region(purpose, alloc_region); | |
4171 | |
4172 if (par) { | |
4173 block = alloc_region->par_allocate(word_size); | |
4174 } else { | |
4175 block = alloc_region->allocate(word_size); | |
4176 } | |
4177 // Caller handles alloc failure. | |
4178 } else { | |
4179 // This sets other apis using the same old alloc region to NULL, also. | |
4180 set_gc_alloc_region(purpose, NULL); | |
4181 } | |
4182 return block; // May be NULL. | |
4183 } | |
4184 | |
4185 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4186 HeapWord* block = NULL; | |
4187 size_t free_words; | |
4188 do { | |
4189 free_words = r->free()/HeapWordSize; | |
4190 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4191 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4192 // Otherwise, try to claim it. |
4193 block = r->par_allocate(free_words); | |
4194 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4195 fill_with_object(block, free_words); |
342 | 4196 } |
4197 | |
4198 #ifndef PRODUCT | |
4199 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4200 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4201 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4202 return true; | |
4203 } | |
4204 #endif // PRODUCT | |
4205 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4206 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4207 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4208 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4209 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4210 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4211 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4212 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4213 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4214 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4215 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4216 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4217 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4218 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4219 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4220 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4221 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4222 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4223 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4224 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4225 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4226 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4227 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4228 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4229 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4230 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4231 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4232 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4233 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4234 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4235 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4236 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4237 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4238 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4239 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4240 } |
342 | 4241 |
1709 | 4242 void |
4243 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4244 { | |
4245 st->print_raw_cr("GC Termination Stats"); | |
4246 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4247 " ------waste (KiB)------"); | |
4248 st->print_raw_cr("thr ms ms % ms % attempts" | |
4249 " total alloc undo"); | |
4250 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4251 " ------- ------- -------"); | |
4252 } | |
4253 | |
4254 void | |
4255 G1ParScanThreadState::print_termination_stats(int i, | |
4256 outputStream* const st) const | |
4257 { | |
4258 const double elapsed_ms = elapsed_time() * 1000.0; | |
4259 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4260 const double term_ms = term_time() * 1000.0; | |
4261 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4262 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4263 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4264 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4265 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4266 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4267 alloc_buffer_waste() * HeapWordSize / K, | |
4268 undo_waste() * HeapWordSize / K); | |
4269 } | |
4270 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4271 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4272 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4273 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4274 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4275 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4276 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4277 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4278 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4279 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4280 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4281 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4282 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4283 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4284 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4285 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4286 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4287 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4288 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4289 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4290 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4291 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4292 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4293 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4294 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4295 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4296 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4297 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4298 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4299 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4300 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4301 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4302 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4303 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4304 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4305 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4306 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4307 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4308 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4309 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4310 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4311 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4312 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4313 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4314 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4315 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4316 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4317 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4318 |
342 | 4319 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4320 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4321 _par_scan_state(par_scan_state) { } | |
4322 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4323 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4324 // This is called _after_ do_oop_work has been called, hence after |
4325 // the object has been relocated to its new location and *p points | |
4326 // to its new location. | |
4327 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4328 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4329 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4330 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4331 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 4332 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4333 HeapWord* addr = (HeapWord*)obj; |
342 | 4334 if (_g1->is_in_g1_reserved(addr)) |
4335 _cm->grayRoot(oop(addr)); | |
4336 } | |
4337 } | |
4338 | |
4339 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4340 size_t word_sz = old->size(); | |
4341 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4342 // +1 to make the -1 indexes valid... | |
4343 int young_index = from_region->young_index_in_cset()+1; | |
4344 assert( (from_region->is_young() && young_index > 0) || | |
4345 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4346 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4347 markOop m = old->mark(); | |
545 | 4348 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4349 : m->age(); | |
4350 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4351 word_sz); |
4352 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4353 oop obj = oop(obj_ptr); | |
4354 | |
4355 if (obj_ptr == NULL) { | |
4356 // This will either forward-to-self, or detect that someone else has | |
4357 // installed a forwarding pointer. | |
4358 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4359 return _g1->handle_evacuation_failure_par(cl, old); | |
4360 } | |
4361 | |
526 | 4362 // We're going to allocate linearly, so might as well prefetch ahead. |
4363 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4364 | |
342 | 4365 oop forward_ptr = old->forward_to_atomic(obj); |
4366 if (forward_ptr == NULL) { | |
4367 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4368 if (g1p->track_object_age(alloc_purpose)) { |
4369 // We could simply do obj->incr_age(). However, this causes a | |
4370 // performance issue. obj->incr_age() will first check whether | |
4371 // the object has a displaced mark by checking its mark word; | |
4372 // getting the mark word from the new location of the object | |
4373 // stalls. So, given that we already have the mark word and we | |
4374 // are about to install it anyway, it's better to increase the | |
4375 // age on the mark word, when the object does not have a | |
4376 // displaced mark word. We're not expecting many objects to have | |
4377 // a displaced marked word, so that case is not optimized | |
4378 // further (it could be...) and we simply call obj->incr_age(). | |
4379 | |
4380 if (m->has_displaced_mark_helper()) { | |
4381 // in this case, we have to install the mark word first, | |
4382 // otherwise obj looks to be forwarded (the old mark word, | |
4383 // which contains the forward pointer, was copied) | |
4384 obj->set_mark(m); | |
4385 obj->incr_age(); | |
4386 } else { | |
4387 m = m->incr_age(); | |
545 | 4388 obj->set_mark(m); |
526 | 4389 } |
545 | 4390 _par_scan_state->age_table()->add(obj, word_sz); |
4391 } else { | |
4392 obj->set_mark(m); | |
526 | 4393 } |
4394 | |
342 | 4395 // preserve "next" mark bit |
4396 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4397 if (!use_local_bitmaps || | |
4398 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4399 // if we couldn't mark it on the local bitmap (this happens when | |
4400 // the object was not allocated in the GCLab), we have to bite | |
4401 // the bullet and do the standard parallel mark | |
4402 _cm->markAndGrayObjectIfNecessary(obj); | |
4403 } | |
4404 #if 1 | |
4405 if (_g1->isMarkedNext(old)) { | |
4406 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4407 } | |
4408 #endif | |
4409 } | |
4410 | |
4411 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4412 surv_young_words[young_index] += word_sz; | |
4413 | |
4414 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4415 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4416 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4417 _par_scan_state->push_on_queue(old_p); |
342 | 4418 } else { |
526 | 4419 // No point in using the slower heap_region_containing() method, |
4420 // given that we know obj is in the heap. | |
4421 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4422 obj->oop_iterate_backwards(_scanner); |
4423 } | |
4424 } else { | |
4425 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4426 obj = forward_ptr; | |
4427 } | |
4428 return obj; | |
4429 } | |
4430 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4431 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4432 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4433 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4434 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4435 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4436 assert(barrier != G1BarrierRS || obj != NULL, |
4437 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4438 | |
526 | 4439 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4440 if (_g1->in_cset_fast_test(obj)) { |
342 | 4441 #if G1_REM_SET_LOGGING |
526 | 4442 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4443 "into CS.", p, (void*) obj); | |
342 | 4444 #endif |
526 | 4445 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4446 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4447 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4448 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4449 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4450 } |
526 | 4451 // When scanning the RS, we only care about objs in CS. |
4452 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4453 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4454 } |
526 | 4455 } |
4456 | |
4457 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4458 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4459 } |
4460 | |
4461 if (do_gen_barrier && obj != NULL) { | |
4462 par_do_barrier(p); | |
4463 } | |
4464 } | |
4465 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4466 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4467 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4468 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4469 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4470 assert(has_partial_array_mask(p), "invariant"); |
4471 oop old = clear_partial_array_mask(p); | |
342 | 4472 assert(old->is_objArray(), "must be obj array"); |
4473 assert(old->is_forwarded(), "must be forwarded"); | |
4474 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4475 | |
4476 objArrayOop obj = objArrayOop(old->forwardee()); | |
4477 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4478 // Process ParGCArrayScanChunk elements now | |
4479 // and push the remainder back onto queue | |
4480 int start = arrayOop(old)->length(); | |
4481 int end = obj->length(); | |
4482 int remainder = end - start; | |
4483 assert(start <= end, "just checking"); | |
4484 if (remainder > 2 * ParGCArrayScanChunk) { | |
4485 // Test above combines last partial chunk with a full chunk | |
4486 end = start + ParGCArrayScanChunk; | |
4487 arrayOop(old)->set_length(end); | |
4488 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4489 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4490 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4491 _par_scan_state->push_on_queue(old_p); |
342 | 4492 } else { |
4493 // Restore length so that the heap remains parsable in | |
4494 // case of evacuation failure. | |
4495 arrayOop(old)->set_length(end); | |
4496 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4497 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4498 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4499 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4500 } |
4501 | |
4502 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4503 protected: | |
4504 G1CollectedHeap* _g1h; | |
4505 G1ParScanThreadState* _par_scan_state; | |
4506 RefToScanQueueSet* _queues; | |
4507 ParallelTaskTerminator* _terminator; | |
4508 | |
4509 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4510 RefToScanQueueSet* queues() { return _queues; } | |
4511 ParallelTaskTerminator* terminator() { return _terminator; } | |
4512 | |
4513 public: | |
4514 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4515 G1ParScanThreadState* par_scan_state, | |
4516 RefToScanQueueSet* queues, | |
4517 ParallelTaskTerminator* terminator) | |
4518 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4519 _queues(queues), _terminator(terminator) {} | |
4520 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4521 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4522 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4523 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4524 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4525 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4526 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4527 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4528 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4529 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4530 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4531 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4532 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4533 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4534 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4535 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4536 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4537 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4538 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4539 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4540 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4541 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4542 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4543 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4544 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4545 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4546 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4547 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4548 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4549 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4550 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4551 // we drain the queues as necessary. |
342 | 4552 pss->trim_queue(); |
4553 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4554 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4555 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4556 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4557 } |
342 | 4558 |
4559 class G1ParTask : public AbstractGangTask { | |
4560 protected: | |
4561 G1CollectedHeap* _g1h; | |
4562 RefToScanQueueSet *_queues; | |
4563 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4564 int _n_workers; |
342 | 4565 |
4566 Mutex _stats_lock; | |
4567 Mutex* stats_lock() { return &_stats_lock; } | |
4568 | |
4569 size_t getNCards() { | |
4570 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4571 / G1BlockOffsetSharedArray::N_bytes; | |
4572 } | |
4573 | |
4574 public: | |
4575 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4576 : AbstractGangTask("G1 collection"), | |
4577 _g1h(g1h), | |
4578 _queues(task_queues), | |
4579 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4580 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4581 _n_workers(workers) |
342 | 4582 {} |
4583 | |
4584 RefToScanQueueSet* queues() { return _queues; } | |
4585 | |
4586 RefToScanQueue *work_queue(int i) { | |
4587 return queues()->queue(i); | |
4588 } | |
4589 | |
4590 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4591 if (i >= _n_workers) return; // no work needed this round |
1611 | 4592 |
4593 double start_time_ms = os::elapsedTime() * 1000.0; | |
4594 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4595 | |
342 | 4596 ResourceMark rm; |
4597 HandleMark hm; | |
4598 | |
526 | 4599 G1ParScanThreadState pss(_g1h, i); |
4600 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4601 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4602 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4603 |
4604 pss.set_evac_closure(&scan_evac_cl); | |
4605 pss.set_evac_failure_closure(&evac_failure_cl); | |
4606 pss.set_partial_scan_closure(&partial_scan_cl); | |
4607 | |
4608 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4609 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4610 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4611 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4612 |
342 | 4613 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4614 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4615 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4616 | |
4617 OopsInHeapRegionClosure *scan_root_cl; | |
4618 OopsInHeapRegionClosure *scan_perm_cl; | |
4619 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4620 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4621 scan_root_cl = &scan_mark_root_cl; |
4622 scan_perm_cl = &scan_mark_perm_cl; | |
4623 } else { | |
4624 scan_root_cl = &only_scan_root_cl; | |
4625 scan_perm_cl = &only_scan_perm_cl; | |
4626 } | |
4627 | |
4628 pss.start_strong_roots(); | |
4629 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4630 SharedHeap::SO_AllClasses, | |
4631 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4632 &push_heap_rs_cl, |
342 | 4633 scan_perm_cl, |
4634 i); | |
4635 pss.end_strong_roots(); | |
4636 { | |
4637 double start = os::elapsedTime(); | |
4638 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4639 evac.do_void(); | |
4640 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4641 double term_ms = pss.term_time()*1000.0; | |
4642 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4643 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4644 } |
1282 | 4645 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4646 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4647 | |
4648 // Clean up any par-expanded rem sets. | |
4649 HeapRegionRemSet::par_cleanup(); | |
4650 | |
4651 if (ParallelGCVerbose) { | |
1709 | 4652 MutexLocker x(stats_lock()); |
4653 pss.print_termination_stats(i); | |
342 | 4654 } |
4655 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4656 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4657 double end_time_ms = os::elapsedTime() * 1000.0; |
4658 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4659 } |
4660 }; | |
4661 | |
4662 // *** Common G1 Evacuation Stuff | |
4663 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4664 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4665 |
342 | 4666 void |
4667 G1CollectedHeap:: | |
4668 g1_process_strong_roots(bool collecting_perm_gen, | |
4669 SharedHeap::ScanningOption so, | |
4670 OopClosure* scan_non_heap_roots, | |
4671 OopsInHeapRegionClosure* scan_rs, | |
4672 OopsInGenClosure* scan_perm, | |
4673 int worker_i) { | |
4674 // First scan the strong roots, including the perm gen. | |
4675 double ext_roots_start = os::elapsedTime(); | |
4676 double closure_app_time_sec = 0.0; | |
4677 | |
4678 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4679 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4680 buf_scan_perm.set_generation(perm_gen()); | |
4681 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4682 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4683 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4684 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4685 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4686 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4687 collecting_perm_gen, so, |
342 | 4688 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4689 &eager_scan_code_roots, |
342 | 4690 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4691 |
342 | 4692 // Finish up any enqueued closure apps. |
4693 buf_scan_non_heap_roots.done(); | |
4694 buf_scan_perm.done(); | |
4695 double ext_roots_end = os::elapsedTime(); | |
4696 g1_policy()->reset_obj_copy_time(worker_i); | |
4697 double obj_copy_time_sec = | |
4698 buf_scan_non_heap_roots.closure_app_seconds() + | |
4699 buf_scan_perm.closure_app_seconds(); | |
4700 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4701 double ext_root_time_ms = | |
4702 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4703 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4704 | |
4705 // Scan strong roots in mark stack. | |
4706 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4707 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4708 } | |
4709 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4710 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4711 | |
4712 // XXX What should this be doing in the parallel case? | |
4713 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4714 // Now scan the complement of the collection set. | |
4715 if (scan_rs != NULL) { | |
4716 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4717 } | |
4718 // Finish with the ref_processor roots. | |
4719 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4720 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4721 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4722 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4723 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4724 ref_processor()->oops_do(scan_non_heap_roots); |
4725 } | |
4726 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4727 _process_strong_tasks->all_tasks_completed(); | |
4728 } | |
4729 | |
4730 void | |
4731 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4732 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4733 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4734 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4735 } |
4736 | |
4737 | |
4738 class SaveMarksClosure: public HeapRegionClosure { | |
4739 public: | |
4740 bool doHeapRegion(HeapRegion* r) { | |
4741 r->save_marks(); | |
4742 return false; | |
4743 } | |
4744 }; | |
4745 | |
4746 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4747 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4748 SaveMarksClosure sm; |
4749 heap_region_iterate(&sm); | |
4750 } | |
4751 // We do this even in the parallel case | |
4752 perm_gen()->save_marks(); | |
4753 } | |
4754 | |
4755 void G1CollectedHeap::evacuate_collection_set() { | |
4756 set_evacuation_failed(false); | |
4757 | |
4758 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4759 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4760 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4761 | |
342 | 4762 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4763 set_par_threads(n_workers); | |
4764 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4765 | |
4766 init_for_evac_failure(NULL); | |
4767 | |
4768 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4769 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4770 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4771 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4772 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4773 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4774 StrongRootsScope srs(this); |
1709 | 4775 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4776 workers()->run_task(&g1_par_task); |
4777 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4778 StrongRootsScope srs(this); |
342 | 4779 g1_par_task.work(0); |
4780 } | |
4781 | |
4782 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4783 g1_policy()->record_par_time(par_time); | |
4784 set_par_threads(0); | |
4785 // Is this the right thing to do here? We don't save marks | |
4786 // on individual heap regions when we allocate from | |
4787 // them in parallel, so this seems like the correct place for this. | |
545 | 4788 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4789 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4790 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4791 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4792 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4793 // here too. |
342 | 4794 { |
4795 G1IsAliveClosure is_alive(this); | |
4796 G1KeepAliveClosure keep_alive(this); | |
4797 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4798 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4799 release_gc_alloc_regions(false /* totally */); |
342 | 4800 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4801 |
889 | 4802 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4803 concurrent_g1_refine()->set_use_cache(true); |
4804 | |
4805 finalize_for_evac_failure(); | |
4806 | |
4807 // Must do this before removing self-forwarding pointers, which clears | |
4808 // the per-region evac-failure flags. | |
4809 concurrent_mark()->complete_marking_in_collection_set(); | |
4810 | |
4811 if (evacuation_failed()) { | |
4812 remove_self_forwarding_pointers(); | |
4813 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4814 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4815 } else if (PrintGC) { |
4816 gclog_or_tty->print("--"); | |
4817 } | |
4818 } | |
4819 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4820 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4821 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4822 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4823 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4824 |
4825 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4826 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4827 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4828 } |
342 | 4829 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4830 } | |
4831 | |
4832 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4833 size_t pre_used = 0; | |
4834 size_t cleared_h_regions = 0; | |
4835 size_t freed_regions = 0; | |
4836 UncleanRegionList local_list; | |
4837 | |
4838 HeapWord* start = hr->bottom(); | |
4839 HeapWord* end = hr->prev_top_at_mark_start(); | |
4840 size_t used_bytes = hr->used(); | |
4841 size_t live_bytes = hr->max_live_bytes(); | |
4842 if (used_bytes > 0) { | |
4843 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4844 } else { | |
4845 guarantee( live_bytes == 0, "invariant" ); | |
4846 } | |
4847 | |
4848 size_t garbage_bytes = used_bytes - live_bytes; | |
4849 if (garbage_bytes > 0) | |
4850 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4851 | |
4852 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4853 &local_list); | |
4854 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4855 &local_list); | |
4856 } | |
4857 | |
4858 void | |
4859 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4860 size_t& pre_used, | |
4861 size_t& cleared_h_regions, | |
4862 size_t& freed_regions, | |
4863 UncleanRegionList* list, | |
4864 bool par) { | |
4865 pre_used += hr->used(); | |
4866 if (hr->isHumongous()) { | |
4867 assert(hr->startsHumongous(), | |
4868 "Only the start of a humongous region should be freed."); | |
4869 int ind = _hrs->find(hr); | |
4870 assert(ind != -1, "Should have an index."); | |
4871 // Clear the start region. | |
4872 hr->hr_clear(par, true /*clear_space*/); | |
4873 list->insert_before_head(hr); | |
4874 cleared_h_regions++; | |
4875 freed_regions++; | |
4876 // Clear any continued regions. | |
4877 ind++; | |
4878 while ((size_t)ind < n_regions()) { | |
4879 HeapRegion* hrc = _hrs->at(ind); | |
4880 if (!hrc->continuesHumongous()) break; | |
4881 // Otherwise, does continue the H region. | |
4882 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4883 hrc->hr_clear(par, true /*clear_space*/); | |
4884 cleared_h_regions++; | |
4885 freed_regions++; | |
4886 list->insert_before_head(hrc); | |
4887 ind++; | |
4888 } | |
4889 } else { | |
4890 hr->hr_clear(par, true /*clear_space*/); | |
4891 list->insert_before_head(hr); | |
4892 freed_regions++; | |
4893 // If we're using clear2, this should not be enabled. | |
4894 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4895 } | |
4896 } | |
4897 | |
4898 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4899 size_t cleared_h_regions, | |
4900 size_t freed_regions, | |
4901 UncleanRegionList* list) { | |
4902 if (list != NULL && list->sz() > 0) { | |
4903 prepend_region_list_on_unclean_list(list); | |
4904 } | |
4905 // Acquire a lock, if we're parallel, to update possibly-shared | |
4906 // variables. | |
4907 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4908 { | |
4909 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4910 _summary_bytes_used -= pre_used; | |
4911 _num_humongous_regions -= (int) cleared_h_regions; | |
4912 _free_regions += freed_regions; | |
4913 } | |
4914 } | |
4915 | |
4916 | |
4917 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4918 while (list != NULL) { | |
4919 guarantee( list->is_young(), "invariant" ); | |
4920 | |
4921 HeapWord* bottom = list->bottom(); | |
4922 HeapWord* end = list->end(); | |
4923 MemRegion mr(bottom, end); | |
4924 ct_bs->dirty(mr); | |
4925 | |
4926 list = list->get_next_young_region(); | |
4927 } | |
4928 } | |
4929 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4930 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4931 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4932 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4933 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4934 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4935 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4936 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4937 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4938 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4939 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4940 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4941 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4942 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4943 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4944 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4945 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4946 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4947 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4948 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4949 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4950 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4951 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4952 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4953 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4954 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4955 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4956 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4957 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4958 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4959 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4960 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4961 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4962 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4963 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4964 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4965 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4966 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4967 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4968 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4969 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4970 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4971 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4972 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4973 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4974 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4975 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4976 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4977 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4978 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4979 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4980 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4981 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4982 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4983 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4984 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4985 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4986 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4987 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4988 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4989 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4990 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4991 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4992 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4993 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4994 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4995 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4996 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4997 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4998 |
342 | 4999 void G1CollectedHeap::cleanUpCardTable() { |
5000 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
5001 double start = os::elapsedTime(); | |
5002 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5003 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5004 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5005 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5006 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5007 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5008 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5009 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5010 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5011 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5012 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5013 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5014 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5015 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5016 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5017 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5018 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5019 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5020 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5021 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5022 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5023 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5024 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5025 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5026 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5027 |
342 | 5028 double elapsed = os::elapsedTime() - start; |
5029 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5030 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5031 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5032 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5033 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5034 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5035 #endif |
342 | 5036 } |
5037 | |
5038 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
5039 double young_time_ms = 0.0; | |
5040 double non_young_time_ms = 0.0; | |
5041 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5042 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5043 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5044 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5045 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5046 |
342 | 5047 G1CollectorPolicy* policy = g1_policy(); |
5048 | |
5049 double start_sec = os::elapsedTime(); | |
5050 bool non_young = true; | |
5051 | |
5052 HeapRegion* cur = cs_head; | |
5053 int age_bound = -1; | |
5054 size_t rs_lengths = 0; | |
5055 | |
5056 while (cur != NULL) { | |
5057 if (non_young) { | |
5058 if (cur->is_young()) { | |
5059 double end_sec = os::elapsedTime(); | |
5060 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5061 non_young_time_ms += elapsed_ms; | |
5062 | |
5063 start_sec = os::elapsedTime(); | |
5064 non_young = false; | |
5065 } | |
5066 } else { | |
5067 if (!cur->is_on_free_list()) { | |
5068 double end_sec = os::elapsedTime(); | |
5069 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5070 young_time_ms += elapsed_ms; | |
5071 | |
5072 start_sec = os::elapsedTime(); | |
5073 non_young = true; | |
5074 } | |
5075 } | |
5076 | |
5077 rs_lengths += cur->rem_set()->occupied(); | |
5078 | |
5079 HeapRegion* next = cur->next_in_collection_set(); | |
5080 assert(cur->in_collection_set(), "bad CS"); | |
5081 cur->set_next_in_collection_set(NULL); | |
5082 cur->set_in_collection_set(false); | |
5083 | |
5084 if (cur->is_young()) { | |
5085 int index = cur->young_index_in_cset(); | |
5086 guarantee( index != -1, "invariant" ); | |
5087 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5088 size_t words_survived = _surviving_young_words[index]; | |
5089 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5090 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5091 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5092 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5093 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5094 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5095 cur->set_next_young_region(NULL); |
342 | 5096 } else { |
5097 int index = cur->young_index_in_cset(); | |
5098 guarantee( index == -1, "invariant" ); | |
5099 } | |
5100 | |
5101 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5102 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5103 "invariant" ); | |
5104 | |
5105 if (!cur->evacuation_failed()) { | |
5106 // And the region is empty. | |
5107 assert(!cur->is_empty(), | |
5108 "Should not have empty regions in a CS."); | |
5109 free_region(cur); | |
5110 } else { | |
5111 cur->uninstall_surv_rate_group(); | |
5112 if (cur->is_young()) | |
5113 cur->set_young_index_in_cset(-1); | |
5114 cur->set_not_young(); | |
5115 cur->set_evacuation_failed(false); | |
5116 } | |
5117 cur = next; | |
5118 } | |
5119 | |
5120 policy->record_max_rs_lengths(rs_lengths); | |
5121 policy->cset_regions_freed(); | |
5122 | |
5123 double end_sec = os::elapsedTime(); | |
5124 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5125 if (non_young) | |
5126 non_young_time_ms += elapsed_ms; | |
5127 else | |
5128 young_time_ms += elapsed_ms; | |
5129 | |
5130 policy->record_young_free_cset_time_ms(young_time_ms); | |
5131 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5132 } | |
5133 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5134 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5135 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5136 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5137 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5138 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5139 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5140 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5141 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5142 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5143 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5144 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5145 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5146 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5147 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5148 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5149 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5150 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5151 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5152 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5153 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5154 |
342 | 5155 HeapRegion* |
5156 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
5157 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5158 HeapRegion* res = pop_unclean_region_list_locked(); | |
5159 if (res != NULL) { | |
5160 assert(!res->continuesHumongous() && | |
5161 res->zero_fill_state() != HeapRegion::Allocated, | |
5162 "Only free regions on unclean list."); | |
5163 if (zero_filled) { | |
5164 res->ensure_zero_filled_locked(); | |
5165 res->set_zero_fill_allocated(); | |
5166 } | |
5167 } | |
5168 return res; | |
5169 } | |
5170 | |
5171 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
5172 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5173 return alloc_region_from_unclean_list_locked(zero_filled); | |
5174 } | |
5175 | |
5176 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
5177 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5178 put_region_on_unclean_list_locked(r); | |
5179 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
5180 } | |
5181 | |
5182 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
5183 MutexLockerEx x(Cleanup_mon); | |
5184 set_unclean_regions_coming_locked(b); | |
5185 } | |
5186 | |
5187 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
5188 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
5189 _unclean_regions_coming = b; | |
5190 // Wake up mutator threads that might be waiting for completeCleanup to | |
5191 // finish. | |
5192 if (!b) Cleanup_mon->notify_all(); | |
5193 } | |
5194 | |
5195 void G1CollectedHeap::wait_for_cleanup_complete() { | |
1973 | 5196 assert_not_at_safepoint(); |
342 | 5197 MutexLockerEx x(Cleanup_mon); |
5198 wait_for_cleanup_complete_locked(); | |
5199 } | |
5200 | |
5201 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
5202 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
5203 while (_unclean_regions_coming) { | |
5204 Cleanup_mon->wait(); | |
5205 } | |
5206 } | |
5207 | |
5208 void | |
5209 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
5210 assert(ZF_mon->owned_by_self(), "precondition."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5211 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5212 if (r->is_gc_alloc_region()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5213 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5214 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5215 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5216 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5217 region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5218 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5219 #endif |
342 | 5220 _unclean_region_list.insert_before_head(r); |
5221 } | |
5222 | |
5223 void | |
5224 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
5225 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5226 prepend_region_list_on_unclean_list_locked(list); | |
5227 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
5228 } | |
5229 | |
5230 void | |
5231 G1CollectedHeap:: | |
5232 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
5233 assert(ZF_mon->owned_by_self(), "precondition."); | |
5234 _unclean_region_list.prepend_list(list); | |
5235 } | |
5236 | |
5237 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
5238 assert(ZF_mon->owned_by_self(), "precondition."); | |
5239 HeapRegion* res = _unclean_region_list.pop(); | |
5240 if (res != NULL) { | |
5241 // Inform ZF thread that there's a new unclean head. | |
5242 if (_unclean_region_list.hd() != NULL && should_zf()) | |
5243 ZF_mon->notify_all(); | |
5244 } | |
5245 return res; | |
5246 } | |
5247 | |
5248 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
5249 assert(ZF_mon->owned_by_self(), "precondition."); | |
5250 return _unclean_region_list.hd(); | |
5251 } | |
5252 | |
5253 | |
5254 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
5255 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5256 HeapRegion* r = peek_unclean_region_list_locked(); | |
5257 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
5258 // Result of below must be equal to "r", since we hold the lock. | |
5259 (void)pop_unclean_region_list_locked(); | |
5260 put_free_region_on_list_locked(r); | |
5261 return true; | |
5262 } else { | |
5263 return false; | |
5264 } | |
5265 } | |
5266 | |
5267 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
5268 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5269 return move_cleaned_region_to_free_list_locked(); | |
5270 } | |
5271 | |
5272 | |
5273 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
5274 assert(ZF_mon->owned_by_self(), "precondition."); | |
5275 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5276 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
5277 "Regions on free list must be zero filled"); | |
5278 assert(!r->isHumongous(), "Must not be humongous."); | |
5279 assert(r->is_empty(), "Better be empty"); | |
5280 assert(!r->is_on_free_list(), | |
5281 "Better not already be on free list"); | |
5282 assert(!r->is_on_unclean_list(), | |
5283 "Better not already be on unclean list"); | |
5284 r->set_on_free_list(true); | |
5285 r->set_next_on_free_list(_free_region_list); | |
5286 _free_region_list = r; | |
5287 _free_region_list_size++; | |
5288 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5289 } | |
5290 | |
5291 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
5292 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5293 put_free_region_on_list_locked(r); | |
5294 } | |
5295 | |
5296 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
5297 assert(ZF_mon->owned_by_self(), "precondition."); | |
5298 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5299 HeapRegion* res = _free_region_list; | |
5300 if (res != NULL) { | |
5301 _free_region_list = res->next_from_free_list(); | |
5302 _free_region_list_size--; | |
5303 res->set_on_free_list(false); | |
5304 res->set_next_on_free_list(NULL); | |
5305 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5306 } | |
5307 return res; | |
5308 } | |
5309 | |
5310 | |
5311 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
5312 // By self, or on behalf of self. | |
5313 assert(Heap_lock->is_locked(), "Precondition"); | |
5314 HeapRegion* res = NULL; | |
5315 bool first = true; | |
5316 while (res == NULL) { | |
5317 if (zero_filled || !first) { | |
5318 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5319 res = pop_free_region_list_locked(); | |
5320 if (res != NULL) { | |
5321 assert(!res->zero_fill_is_allocated(), | |
5322 "No allocated regions on free list."); | |
5323 res->set_zero_fill_allocated(); | |
5324 } else if (!first) { | |
5325 break; // We tried both, time to return NULL. | |
5326 } | |
5327 } | |
5328 | |
5329 if (res == NULL) { | |
5330 res = alloc_region_from_unclean_list(zero_filled); | |
5331 } | |
5332 assert(res == NULL || | |
5333 !zero_filled || | |
5334 res->zero_fill_is_allocated(), | |
5335 "We must have allocated the region we're returning"); | |
5336 first = false; | |
5337 } | |
5338 return res; | |
5339 } | |
5340 | |
5341 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
5342 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5343 { | |
5344 HeapRegion* prev = NULL; | |
5345 HeapRegion* cur = _unclean_region_list.hd(); | |
5346 while (cur != NULL) { | |
5347 HeapRegion* next = cur->next_from_unclean_list(); | |
5348 if (cur->zero_fill_is_allocated()) { | |
5349 // Remove from the list. | |
5350 if (prev == NULL) { | |
5351 (void)_unclean_region_list.pop(); | |
5352 } else { | |
5353 _unclean_region_list.delete_after(prev); | |
5354 } | |
5355 cur->set_on_unclean_list(false); | |
5356 cur->set_next_on_unclean_list(NULL); | |
5357 } else { | |
5358 prev = cur; | |
5359 } | |
5360 cur = next; | |
5361 } | |
5362 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
5363 "Inv"); | |
5364 } | |
5365 | |
5366 { | |
5367 HeapRegion* prev = NULL; | |
5368 HeapRegion* cur = _free_region_list; | |
5369 while (cur != NULL) { | |
5370 HeapRegion* next = cur->next_from_free_list(); | |
5371 if (cur->zero_fill_is_allocated()) { | |
5372 // Remove from the list. | |
5373 if (prev == NULL) { | |
5374 _free_region_list = cur->next_from_free_list(); | |
5375 } else { | |
5376 prev->set_next_on_free_list(cur->next_from_free_list()); | |
5377 } | |
5378 cur->set_on_free_list(false); | |
5379 cur->set_next_on_free_list(NULL); | |
5380 _free_region_list_size--; | |
5381 } else { | |
5382 prev = cur; | |
5383 } | |
5384 cur = next; | |
5385 } | |
5386 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5387 } | |
5388 } | |
5389 | |
5390 bool G1CollectedHeap::verify_region_lists() { | |
5391 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5392 return verify_region_lists_locked(); | |
5393 } | |
5394 | |
5395 bool G1CollectedHeap::verify_region_lists_locked() { | |
5396 HeapRegion* unclean = _unclean_region_list.hd(); | |
5397 while (unclean != NULL) { | |
5398 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
5399 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
5400 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
5401 "Everything else is possible."); | |
5402 unclean = unclean->next_from_unclean_list(); | |
5403 } | |
5404 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
5405 | |
5406 HeapRegion* free_r = _free_region_list; | |
5407 while (free_r != NULL) { | |
5408 assert(free_r->is_on_free_list(), "Well, it is!"); | |
5409 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
5410 switch (free_r->zero_fill_state()) { | |
5411 case HeapRegion::NotZeroFilled: | |
5412 case HeapRegion::ZeroFilling: | |
5413 guarantee(false, "Should not be on free list."); | |
5414 break; | |
5415 default: | |
5416 // Everything else is possible. | |
5417 break; | |
5418 } | |
5419 free_r = free_r->next_from_free_list(); | |
5420 } | |
5421 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
5422 // If we didn't do an assertion... | |
5423 return true; | |
5424 } | |
5425 | |
5426 size_t G1CollectedHeap::free_region_list_length() { | |
5427 assert(ZF_mon->owned_by_self(), "precondition."); | |
5428 size_t len = 0; | |
5429 HeapRegion* cur = _free_region_list; | |
5430 while (cur != NULL) { | |
5431 len++; | |
5432 cur = cur->next_from_free_list(); | |
5433 } | |
5434 return len; | |
5435 } | |
5436 | |
5437 size_t G1CollectedHeap::unclean_region_list_length() { | |
5438 assert(ZF_mon->owned_by_self(), "precondition."); | |
5439 return _unclean_region_list.length(); | |
5440 } | |
5441 | |
5442 size_t G1CollectedHeap::n_regions() { | |
5443 return _hrs->length(); | |
5444 } | |
5445 | |
5446 size_t G1CollectedHeap::max_regions() { | |
5447 return | |
5448 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
5449 HeapRegion::GrainBytes; | |
5450 } | |
5451 | |
5452 size_t G1CollectedHeap::free_regions() { | |
5453 /* Possibly-expensive assert. | |
5454 assert(_free_regions == count_free_regions(), | |
5455 "_free_regions is off."); | |
5456 */ | |
5457 return _free_regions; | |
5458 } | |
5459 | |
5460 bool G1CollectedHeap::should_zf() { | |
5461 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
5462 } | |
5463 | |
5464 class RegionCounter: public HeapRegionClosure { | |
5465 size_t _n; | |
5466 public: | |
5467 RegionCounter() : _n(0) {} | |
5468 bool doHeapRegion(HeapRegion* r) { | |
677 | 5469 if (r->is_empty()) { |
342 | 5470 assert(!r->isHumongous(), "H regions should not be empty."); |
5471 _n++; | |
5472 } | |
5473 return false; | |
5474 } | |
5475 int res() { return (int) _n; } | |
5476 }; | |
5477 | |
5478 size_t G1CollectedHeap::count_free_regions() { | |
5479 RegionCounter rc; | |
5480 heap_region_iterate(&rc); | |
5481 size_t n = rc.res(); | |
5482 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
5483 n--; | |
5484 return n; | |
5485 } | |
5486 | |
5487 size_t G1CollectedHeap::count_free_regions_list() { | |
5488 size_t n = 0; | |
5489 size_t o = 0; | |
5490 ZF_mon->lock_without_safepoint_check(); | |
5491 HeapRegion* cur = _free_region_list; | |
5492 while (cur != NULL) { | |
5493 cur = cur->next_from_free_list(); | |
5494 n++; | |
5495 } | |
5496 size_t m = unclean_region_list_length(); | |
5497 ZF_mon->unlock(); | |
5498 return n + m; | |
5499 } | |
5500 | |
5501 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5502 assert(heap_lock_held_for_gc(), | |
5503 "the heap lock should already be held by or for this thread"); | |
5504 _young_list->push_region(hr); | |
5505 g1_policy()->set_region_short_lived(hr); | |
5506 } | |
5507 | |
5508 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5509 private: | |
5510 bool _success; | |
5511 public: | |
5512 NoYoungRegionsClosure() : _success(true) { } | |
5513 bool doHeapRegion(HeapRegion* r) { | |
5514 if (r->is_young()) { | |
5515 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5516 r->bottom(), r->end()); | |
5517 _success = false; | |
5518 } | |
5519 return false; | |
5520 } | |
5521 bool success() { return _success; } | |
5522 }; | |
5523 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5524 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5525 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5526 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5527 if (check_heap) { |
342 | 5528 NoYoungRegionsClosure closure; |
5529 heap_region_iterate(&closure); | |
5530 ret = ret && closure.success(); | |
5531 } | |
5532 | |
5533 return ret; | |
5534 } | |
5535 | |
5536 void G1CollectedHeap::empty_young_list() { | |
5537 assert(heap_lock_held_for_gc(), | |
5538 "the heap lock should already be held by or for this thread"); | |
5539 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5540 | |
5541 _young_list->empty_list(); | |
5542 } | |
5543 | |
5544 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5545 bool no_allocs = true; | |
5546 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5547 HeapRegion* r = _gc_alloc_regions[ap]; | |
5548 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5549 } | |
5550 return no_allocs; | |
5551 } | |
5552 | |
545 | 5553 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5554 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5555 HeapRegion* r = _gc_alloc_regions[ap]; | |
5556 if (r != NULL) { | |
5557 // Check for aliases. | |
5558 bool has_processed_alias = false; | |
5559 for (int i = 0; i < ap; ++i) { | |
5560 if (_gc_alloc_regions[i] == r) { | |
5561 has_processed_alias = true; | |
5562 break; | |
5563 } | |
5564 } | |
5565 if (!has_processed_alias) { | |
545 | 5566 retire_alloc_region(r, false /* par */); |
342 | 5567 } |
5568 } | |
5569 } | |
5570 } | |
5571 | |
5572 | |
5573 // Done at the start of full GC. | |
5574 void G1CollectedHeap::tear_down_region_lists() { | |
5575 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5576 while (pop_unclean_region_list_locked() != NULL) ; | |
5577 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1394
diff
changeset
|
5578 "Postconditions of loop."); |
342 | 5579 while (pop_free_region_list_locked() != NULL) ; |
5580 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5581 if (_free_region_list_size != 0) { | |
5582 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
5583 print_on(gclog_or_tty, true /* extended */); |
342 | 5584 } |
5585 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5586 } | |
5587 | |
5588 | |
5589 class RegionResetter: public HeapRegionClosure { | |
5590 G1CollectedHeap* _g1; | |
5591 int _n; | |
5592 public: | |
5593 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5594 bool doHeapRegion(HeapRegion* r) { | |
5595 if (r->continuesHumongous()) return false; | |
5596 if (r->top() > r->bottom()) { | |
5597 if (r->top() < r->end()) { | |
5598 Copy::fill_to_words(r->top(), | |
5599 pointer_delta(r->end(), r->top())); | |
5600 } | |
5601 r->set_zero_fill_allocated(); | |
5602 } else { | |
5603 assert(r->is_empty(), "tautology"); | |
677 | 5604 _n++; |
5605 switch (r->zero_fill_state()) { | |
342 | 5606 case HeapRegion::NotZeroFilled: |
5607 case HeapRegion::ZeroFilling: | |
5608 _g1->put_region_on_unclean_list_locked(r); | |
5609 break; | |
5610 case HeapRegion::Allocated: | |
5611 r->set_zero_fill_complete(); | |
5612 // no break; go on to put on free list. | |
5613 case HeapRegion::ZeroFilled: | |
5614 _g1->put_free_region_on_list_locked(r); | |
5615 break; | |
5616 } | |
5617 } | |
5618 return false; | |
5619 } | |
5620 | |
5621 int getFreeRegionCount() {return _n;} | |
5622 }; | |
5623 | |
5624 // Done at the end of full GC. | |
5625 void G1CollectedHeap::rebuild_region_lists() { | |
5626 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5627 // This needs to go at the end of the full GC. | |
5628 RegionResetter rs; | |
5629 heap_region_iterate(&rs); | |
5630 _free_regions = rs.getFreeRegionCount(); | |
5631 // Tell the ZF thread it may have work to do. | |
5632 if (should_zf()) ZF_mon->notify_all(); | |
5633 } | |
5634 | |
5635 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5636 G1CollectedHeap* _g1; | |
5637 int _n; | |
5638 public: | |
5639 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5640 bool doHeapRegion(HeapRegion* r) { | |
5641 if (r->continuesHumongous()) return false; | |
5642 if (r->top() > r->bottom()) { | |
5643 // There are assertions in "set_zero_fill_needed()" below that | |
5644 // require top() == bottom(), so this is technically illegal. | |
5645 // We'll skirt the law here, by making that true temporarily. | |
5646 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5647 r->set_top(r->bottom())); | |
5648 r->set_zero_fill_needed(); | |
5649 DEBUG_ONLY(r->set_top(save_top)); | |
5650 } | |
5651 return false; | |
5652 } | |
5653 }; | |
5654 | |
5655 // Done at the start of full GC. | |
5656 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5657 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5658 // This needs to go at the end of the full GC. | |
5659 UsedRegionsNeedZeroFillSetter rs; | |
5660 heap_region_iterate(&rs); | |
5661 } | |
5662 | |
5663 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5664 _refine_cte_cl->set_concurrent(concurrent); | |
5665 } | |
5666 | |
5667 #ifndef PRODUCT | |
5668 | |
5669 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5670 public: | |
5671 bool doHeapRegion(HeapRegion *r) { | |
5672 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5673 if (r != NULL) { | |
5674 if (r->is_on_free_list()) | |
5675 gclog_or_tty->print("Free "); | |
5676 if (r->is_young()) | |
5677 gclog_or_tty->print("Young "); | |
5678 if (r->isHumongous()) | |
5679 gclog_or_tty->print("Is Humongous "); | |
5680 r->print(); | |
5681 } | |
5682 return false; | |
5683 } | |
5684 }; | |
5685 | |
5686 class SortHeapRegionClosure : public HeapRegionClosure { | |
5687 size_t young_regions,free_regions, unclean_regions; | |
5688 size_t hum_regions, count; | |
5689 size_t unaccounted, cur_unclean, cur_alloc; | |
5690 size_t total_free; | |
5691 HeapRegion* cur; | |
5692 public: | |
5693 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5694 free_regions(0), unclean_regions(0), | |
5695 hum_regions(0), | |
5696 count(0), unaccounted(0), | |
5697 cur_alloc(0), total_free(0) | |
5698 {} | |
5699 bool doHeapRegion(HeapRegion *r) { | |
5700 count++; | |
5701 if (r->is_on_free_list()) free_regions++; | |
5702 else if (r->is_on_unclean_list()) unclean_regions++; | |
5703 else if (r->isHumongous()) hum_regions++; | |
5704 else if (r->is_young()) young_regions++; | |
5705 else if (r == cur) cur_alloc++; | |
5706 else unaccounted++; | |
5707 return false; | |
5708 } | |
5709 void print() { | |
5710 total_free = free_regions + unclean_regions; | |
5711 gclog_or_tty->print("%d regions\n", count); | |
5712 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5713 total_free, free_regions, unclean_regions); | |
5714 gclog_or_tty->print("%d humongous %d young\n", | |
5715 hum_regions, young_regions); | |
5716 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5717 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5718 } | |
5719 }; | |
5720 | |
5721 void G1CollectedHeap::print_region_counts() { | |
5722 SortHeapRegionClosure sc(_cur_alloc_region); | |
5723 PrintHeapRegionClosure cl; | |
5724 heap_region_iterate(&cl); | |
5725 heap_region_iterate(&sc); | |
5726 sc.print(); | |
5727 print_region_accounting_info(); | |
5728 }; | |
5729 | |
5730 bool G1CollectedHeap::regions_accounted_for() { | |
5731 // TODO: regions accounting for young/survivor/tenured | |
5732 return true; | |
5733 } | |
5734 | |
5735 bool G1CollectedHeap::print_region_accounting_info() { | |
5736 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5737 free_regions(), | |
5738 count_free_regions(), count_free_regions_list(), | |
5739 _free_region_list_size, _unclean_region_list.sz()); | |
5740 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5741 (_cur_alloc_region == NULL ? 0 : 1)); | |
5742 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5743 | |
5744 // TODO: check regions accounting for young/survivor/tenured | |
5745 return true; | |
5746 } | |
5747 | |
5748 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5749 HeapRegion* hr = heap_region_containing(p); | |
5750 if (hr == NULL) { | |
5751 return is_in_permanent(p); | |
5752 } else { | |
5753 return hr->is_in(p); | |
5754 } | |
5755 } | |
941 | 5756 #endif // !PRODUCT |
342 | 5757 |
5758 void G1CollectedHeap::g1_unimplemented() { | |
5759 // Unimplemented(); | |
5760 } |