Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3777:e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
Summary: Remove two unnecessary iterations over the collection set which are supposed to prepare the RSet's of the CSet regions for parallel iterations (we'll make sure this is done incrementally). I'll piggyback on this CR the removal of the G1_REM_SET_LOGGING code.
Reviewed-by: brutisso, johnc
author | tonyp |
---|---|
date | Tue, 21 Jun 2011 15:23:07 -0400 |
parents | c9ca3f51cf41 |
children | 5f6f2615433a |
rev | line source |
---|---|
342 | 1 /* |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
1972 | 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _cmThread->stop(); |
429 } | |
430 | |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
431 #ifdef ASSERT |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
432 // A region is added to the collection set as it is retired |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
433 // so an address p can point to a region which will be in the |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
434 // collection set but has not yet been retired. This method |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
435 // therefore is only accurate during a GC pause after all |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
436 // regions have been retired. It is used for debugging |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
437 // to check if an nmethod has references to objects that can |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
438 // be move during a partial collection. Though it can be |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
439 // inaccurate, it is sufficient for G1 because the conservative |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
440 // implementation of is_scavengable() for G1 will indicate that |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
441 // all nmethods must be scanned during a partial collection. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
442 bool G1CollectedHeap::is_in_partial_collection(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
443 HeapRegion* hr = heap_region_containing(p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
444 return hr != NULL && hr->in_collection_set(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
445 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
446 #endif |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
447 |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
448 // Returns true if the reference points to an object that |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
449 // can move in an incremental collecction. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
450 bool G1CollectedHeap::is_scavengable(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
451 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
452 G1CollectorPolicy* g1p = g1h->g1_policy(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
453 HeapRegion* hr = heap_region_containing(p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
454 if (hr == NULL) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
455 // perm gen (or null) |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
456 return false; |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
457 } else { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
458 return !hr->isHumongous(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
459 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
460 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
461 |
342 | 462 void G1CollectedHeap::check_ct_logs_at_safepoint() { |
463 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
464 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
465 | |
466 // Count the dirty cards at the start. | |
467 CountNonCleanMemRegionClosure count1(this); | |
468 ct_bs->mod_card_iterate(&count1); | |
469 int orig_count = count1.n(); | |
470 | |
471 // First clear the logged cards. | |
472 ClearLoggedCardTableEntryClosure clear; | |
473 dcqs.set_closure(&clear); | |
474 dcqs.apply_closure_to_all_completed_buffers(); | |
475 dcqs.iterate_closure_all_threads(false); | |
476 clear.print_histo(); | |
477 | |
478 // Now ensure that there's no dirty cards. | |
479 CountNonCleanMemRegionClosure count2(this); | |
480 ct_bs->mod_card_iterate(&count2); | |
481 if (count2.n() != 0) { | |
482 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
483 count2.n(), orig_count); | |
484 } | |
485 guarantee(count2.n() == 0, "Card table should be clean."); | |
486 | |
487 RedirtyLoggedCardTableEntryClosure redirty; | |
488 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
489 dcqs.apply_closure_to_all_completed_buffers(); | |
490 dcqs.iterate_closure_all_threads(false); | |
491 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
492 clear.calls(), orig_count); | |
493 guarantee(redirty.calls() == clear.calls(), | |
494 "Or else mechanism is broken."); | |
495 | |
496 CountNonCleanMemRegionClosure count3(this); | |
497 ct_bs->mod_card_iterate(&count3); | |
498 if (count3.n() != orig_count) { | |
499 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
500 orig_count, count3.n()); | |
501 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
502 } | |
503 | |
504 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
505 } | |
506 | |
507 // Private class members. | |
508 | |
509 G1CollectedHeap* G1CollectedHeap::_g1h; | |
510 | |
511 // Private methods. | |
512 | |
2152 | 513 HeapRegion* |
2361 | 514 G1CollectedHeap::new_region_try_secondary_free_list() { |
2152 | 515 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
516 while (!_secondary_free_list.is_empty() || free_regions_coming()) { | |
517 if (!_secondary_free_list.is_empty()) { | |
518 if (G1ConcRegionFreeingVerbose) { | |
519 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
520 "secondary_free_list has "SIZE_FORMAT" entries", | |
521 _secondary_free_list.length()); | |
522 } | |
523 // It looks as if there are free regions available on the | |
524 // secondary_free_list. Let's move them to the free_list and try | |
525 // again to allocate from it. | |
526 append_secondary_free_list(); | |
527 | |
528 assert(!_free_list.is_empty(), "if the secondary_free_list was not " | |
529 "empty we should have moved at least one entry to the free_list"); | |
530 HeapRegion* res = _free_list.remove_head(); | |
531 if (G1ConcRegionFreeingVerbose) { | |
532 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
533 "allocated "HR_FORMAT" from secondary_free_list", | |
534 HR_FORMAT_PARAMS(res)); | |
535 } | |
536 return res; | |
537 } | |
538 | |
539 // Wait here until we get notifed either when (a) there are no | |
540 // more free regions coming or (b) some regions have been moved on | |
541 // the secondary_free_list. | |
542 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
543 } | |
544 | |
545 if (G1ConcRegionFreeingVerbose) { | |
546 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
547 "could not allocate from secondary_free_list"); | |
548 } | |
549 return NULL; | |
550 } | |
551 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
552 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { |
2152 | 553 assert(!isHumongous(word_size) || |
554 word_size <= (size_t) HeapRegion::GrainWords, | |
555 "the only time we use this to allocate a humongous region is " | |
556 "when we are allocating a single humongous region"); | |
557 | |
558 HeapRegion* res; | |
559 if (G1StressConcRegionFreeing) { | |
560 if (!_secondary_free_list.is_empty()) { | |
561 if (G1ConcRegionFreeingVerbose) { | |
562 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
563 "forced to look at the secondary_free_list"); | |
564 } | |
2361 | 565 res = new_region_try_secondary_free_list(); |
2152 | 566 if (res != NULL) { |
567 return res; | |
568 } | |
569 } | |
570 } | |
571 res = _free_list.remove_head_or_null(); | |
572 if (res == NULL) { | |
573 if (G1ConcRegionFreeingVerbose) { | |
574 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
575 "res == NULL, trying the secondary_free_list"); | |
576 } | |
2361 | 577 res = new_region_try_secondary_free_list(); |
2152 | 578 } |
342 | 579 if (res == NULL && do_expand) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
580 if (expand(word_size * HeapWordSize)) { |
3766 | 581 // Even though the heap was expanded, it might not have reached |
582 // the desired size. So, we cannot assume that the allocation | |
583 // will succeed. | |
584 res = _free_list.remove_head_or_null(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
585 } |
342 | 586 } |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
587 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
588 if (G1PrintHeapRegions) { |
3766 | 589 gclog_or_tty->print_cr("new alloc region "HR_FORMAT, |
590 HR_FORMAT_PARAMS(res)); | |
342 | 591 } |
592 } | |
593 return res; | |
594 } | |
595 | |
2152 | 596 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, |
597 size_t word_size) { | |
342 | 598 HeapRegion* alloc_region = NULL; |
599 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
600 alloc_region = new_region(word_size, true /* do_expand */); |
342 | 601 if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
545 | 602 alloc_region->set_survivor(); |
342 | 603 } |
604 ++_gc_alloc_region_counts[purpose]; | |
605 } else { | |
606 g1_policy()->note_alloc_region_limit_reached(purpose); | |
607 } | |
608 return alloc_region; | |
609 } | |
610 | |
3766 | 611 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, |
612 size_t word_size) { | |
2361 | 613 assert(isHumongous(word_size), "word_size should be humongous"); |
614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
615 | |
3766 | 616 size_t first = G1_NULL_HRS_INDEX; |
2152 | 617 if (num_regions == 1) { |
618 // Only one region to allocate, no need to go through the slower | |
619 // path. The caller will attempt the expasion if this fails, so | |
620 // let's not try to expand here too. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
621 HeapRegion* hr = new_region(word_size, false /* do_expand */); |
2152 | 622 if (hr != NULL) { |
623 first = hr->hrs_index(); | |
624 } else { | |
3766 | 625 first = G1_NULL_HRS_INDEX; |
2152 | 626 } |
627 } else { | |
628 // We can't allocate humongous regions while cleanupComplete() is | |
629 // running, since some of the regions we find to be empty might not | |
630 // yet be added to the free list and it is not straightforward to | |
631 // know which list they are on so that we can remove them. Note | |
632 // that we only need to do this if we need to allocate more than | |
633 // one region to satisfy the current humongous allocation | |
634 // request. If we are only allocating one region we use the common | |
635 // region allocation code (see above). | |
636 wait_while_free_regions_coming(); | |
2361 | 637 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 638 |
639 if (free_regions() >= num_regions) { | |
3766 | 640 first = _hrs.find_contiguous(num_regions); |
641 if (first != G1_NULL_HRS_INDEX) { | |
642 for (size_t i = first; i < first + num_regions; ++i) { | |
643 HeapRegion* hr = region_at(i); | |
2152 | 644 assert(hr->is_empty(), "sanity"); |
2361 | 645 assert(is_on_master_free_list(hr), "sanity"); |
2152 | 646 hr->set_pending_removal(true); |
647 } | |
648 _free_list.remove_all_pending(num_regions); | |
649 } | |
650 } | |
651 } | |
652 return first; | |
653 } | |
654 | |
2361 | 655 HeapWord* |
3766 | 656 G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, |
2361 | 657 size_t num_regions, |
658 size_t word_size) { | |
3766 | 659 assert(first != G1_NULL_HRS_INDEX, "pre-condition"); |
2361 | 660 assert(isHumongous(word_size), "word_size should be humongous"); |
661 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
662 | |
663 // Index of last region in the series + 1. | |
3766 | 664 size_t last = first + num_regions; |
2361 | 665 |
666 // We need to initialize the region(s) we just discovered. This is | |
667 // a bit tricky given that it can happen concurrently with | |
668 // refinement threads refining cards on these regions and | |
669 // potentially wanting to refine the BOT as they are scanning | |
670 // those cards (this can happen shortly after a cleanup; see CR | |
671 // 6991377). So we have to set up the region(s) carefully and in | |
672 // a specific order. | |
673 | |
674 // The word size sum of all the regions we will allocate. | |
675 size_t word_size_sum = num_regions * HeapRegion::GrainWords; | |
676 assert(word_size <= word_size_sum, "sanity"); | |
677 | |
678 // This will be the "starts humongous" region. | |
3766 | 679 HeapRegion* first_hr = region_at(first); |
2361 | 680 // The header of the new object will be placed at the bottom of |
681 // the first region. | |
682 HeapWord* new_obj = first_hr->bottom(); | |
683 // This will be the new end of the first region in the series that | |
684 // should also match the end of the last region in the seriers. | |
685 HeapWord* new_end = new_obj + word_size_sum; | |
686 // This will be the new top of the first region that will reflect | |
687 // this allocation. | |
688 HeapWord* new_top = new_obj + word_size; | |
689 | |
690 // First, we need to zero the header of the space that we will be | |
691 // allocating. When we update top further down, some refinement | |
692 // threads might try to scan the region. By zeroing the header we | |
693 // ensure that any thread that will try to scan the region will | |
694 // come across the zero klass word and bail out. | |
695 // | |
696 // NOTE: It would not have been correct to have used | |
697 // CollectedHeap::fill_with_object() and make the space look like | |
698 // an int array. The thread that is doing the allocation will | |
699 // later update the object header to a potentially different array | |
700 // type and, for a very short period of time, the klass and length | |
701 // fields will be inconsistent. This could cause a refinement | |
702 // thread to calculate the object size incorrectly. | |
703 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); | |
704 | |
705 // We will set up the first region as "starts humongous". This | |
706 // will also update the BOT covering all the regions to reflect | |
707 // that there is a single object that starts at the bottom of the | |
708 // first region. | |
709 first_hr->set_startsHumongous(new_top, new_end); | |
710 | |
711 // Then, if there are any, we will set up the "continues | |
712 // humongous" regions. | |
713 HeapRegion* hr = NULL; | |
3766 | 714 for (size_t i = first + 1; i < last; ++i) { |
715 hr = region_at(i); | |
2361 | 716 hr->set_continuesHumongous(first_hr); |
717 } | |
718 // If we have "continues humongous" regions (hr != NULL), then the | |
719 // end of the last one should match new_end. | |
720 assert(hr == NULL || hr->end() == new_end, "sanity"); | |
721 | |
722 // Up to this point no concurrent thread would have been able to | |
723 // do any scanning on any region in this series. All the top | |
724 // fields still point to bottom, so the intersection between | |
725 // [bottom,top] and [card_start,card_end] will be empty. Before we | |
726 // update the top fields, we'll do a storestore to make sure that | |
727 // no thread sees the update to top before the zeroing of the | |
728 // object header and the BOT initialization. | |
729 OrderAccess::storestore(); | |
730 | |
731 // Now that the BOT and the object header have been initialized, | |
732 // we can update top of the "starts humongous" region. | |
733 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), | |
734 "new_top should be in this region"); | |
735 first_hr->set_top(new_top); | |
736 | |
737 // Now, we will update the top fields of the "continues humongous" | |
738 // regions. The reason we need to do this is that, otherwise, | |
739 // these regions would look empty and this will confuse parts of | |
740 // G1. For example, the code that looks for a consecutive number | |
741 // of empty regions will consider them empty and try to | |
742 // re-allocate them. We can extend is_empty() to also include | |
743 // !continuesHumongous(), but it is easier to just update the top | |
744 // fields here. The way we set top for all regions (i.e., top == | |
745 // end for all regions but the last one, top == new_top for the | |
746 // last one) is actually used when we will free up the humongous | |
747 // region in free_humongous_region(). | |
748 hr = NULL; | |
3766 | 749 for (size_t i = first + 1; i < last; ++i) { |
750 hr = region_at(i); | |
2361 | 751 if ((i + 1) == last) { |
752 // last continues humongous region | |
753 assert(hr->bottom() < new_top && new_top <= hr->end(), | |
754 "new_top should fall on this region"); | |
755 hr->set_top(new_top); | |
756 } else { | |
757 // not last one | |
758 assert(new_top > hr->end(), "new_top should be above this region"); | |
759 hr->set_top(hr->end()); | |
760 } | |
761 } | |
762 // If we have continues humongous regions (hr != NULL), then the | |
763 // end of the last one should match new_end and its top should | |
764 // match new_top. | |
765 assert(hr == NULL || | |
766 (hr->end() == new_end && hr->top() == new_top), "sanity"); | |
767 | |
768 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); | |
769 _summary_bytes_used += first_hr->used(); | |
770 _humongous_set.add(first_hr); | |
771 | |
772 return new_obj; | |
773 } | |
774 | |
342 | 775 // If could fit into free regions w/o expansion, try. |
776 // Otherwise, if can expand, do so. | |
777 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 778 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
2152 | 779 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
780 | |
781 verify_region_sets_optional(); | |
342 | 782 |
783 size_t num_regions = | |
1973 | 784 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 785 size_t x_size = expansion_regions(); |
3766 | 786 size_t fs = _hrs.free_suffix(); |
787 size_t first = humongous_obj_allocate_find_first(num_regions, word_size); | |
788 if (first == G1_NULL_HRS_INDEX) { | |
2152 | 789 // The only thing we can do now is attempt expansion. |
342 | 790 if (fs + x_size >= num_regions) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
791 // If the number of regions we're trying to allocate for this |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
792 // object is at most the number of regions in the free suffix, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
793 // then the call to humongous_obj_allocate_find_first() above |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
794 // should have succeeded and we wouldn't be here. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
795 // |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
796 // We should only be trying to expand when the free suffix is |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
797 // not sufficient for the object _and_ we have some expansion |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
798 // room available. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
799 assert(num_regions > fs, "earlier allocation should have succeeded"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
800 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
801 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { |
3766 | 802 // Even though the heap was expanded, it might not have |
803 // reached the desired size. So, we cannot assume that the | |
804 // allocation will succeed. | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
805 first = humongous_obj_allocate_find_first(num_regions, word_size); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
806 } |
2152 | 807 } |
808 } | |
809 | |
2361 | 810 HeapWord* result = NULL; |
3766 | 811 if (first != G1_NULL_HRS_INDEX) { |
2361 | 812 result = |
813 humongous_obj_allocate_initialize_regions(first, num_regions, word_size); | |
814 assert(result != NULL, "it should always return a valid result"); | |
2152 | 815 } |
816 | |
817 verify_region_sets_optional(); | |
2361 | 818 |
819 return result; | |
342 | 820 } |
821 | |
1973 | 822 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
823 assert_heap_not_locked_and_not_at_safepoint(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
824 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
825 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
826 unsigned int dummy_gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
827 return attempt_allocation(word_size, &dummy_gc_count_before); |
342 | 828 } |
829 | |
830 HeapWord* | |
831 G1CollectedHeap::mem_allocate(size_t word_size, | |
1973 | 832 bool* gc_overhead_limit_was_exceeded) { |
833 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 834 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
835 // Loop until the allocation is satisified, or unsatisfied after GC. |
1973 | 836 for (int try_count = 1; /* we'll return */; try_count += 1) { |
837 unsigned int gc_count_before; | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
838 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
839 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
840 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
841 result = attempt_allocation(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
842 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
843 result = attempt_allocation_humongous(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
844 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
845 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
846 return result; |
342 | 847 } |
848 | |
849 // Create the garbage collection operation... | |
1973 | 850 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 851 // ...and get the VM thread to execute it. |
852 VMThread::execute(&op); | |
1973 | 853 |
854 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
855 // If the operation was successful we'll return the result even | |
856 // if it is NULL. If the allocation attempt failed immediately | |
857 // after a Full GC, it's unlikely we'll be able to allocate now. | |
858 HeapWord* result = op.result(); | |
859 if (result != NULL && !isHumongous(word_size)) { | |
860 // Allocations that take place on VM operations do not do any | |
861 // card dirtying and we have to do it here. We only have to do | |
862 // this for non-humongous allocations, though. | |
863 dirty_young_block(result, word_size); | |
864 } | |
342 | 865 return result; |
1973 | 866 } else { |
867 assert(op.result() == NULL, | |
868 "the result should be NULL if the VM op did not succeed"); | |
342 | 869 } |
870 | |
871 // Give a warning if we seem to be looping forever. | |
872 if ((QueuedAllocationWarningCount > 0) && | |
873 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 874 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 875 } |
876 } | |
1973 | 877 |
878 ShouldNotReachHere(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
879 return NULL; |
342 | 880 } |
881 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
882 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
883 unsigned int *gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
884 // Make sure you read the note in attempt_allocation_humongous(). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
885 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
886 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
887 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
888 "be called for humongous allocation requests"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
889 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
890 // We should only get here after the first-level allocation attempt |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
891 // (attempt_allocation()) failed to allocate. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
892 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
893 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
894 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
895 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
896 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
897 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
898 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
899 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
900 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
901 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
902 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
903 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
904 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
905 result = _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
906 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
907 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
908 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
909 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
910 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
911 // If we reach here, attempt_allocation_locked() above failed to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
912 // allocate a new region. So the mutator alloc region should be NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
913 assert(_mutator_alloc_region.get() == NULL, "only way to get here"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
914 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
915 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
916 if (g1_policy()->can_expand_young_list()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
917 result = _mutator_alloc_region.attempt_allocation_force(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
918 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
919 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
920 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
921 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
922 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
923 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
924 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
925 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
926 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
927 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
928 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
929 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
930 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
931 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
932 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
933 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
934 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
935 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
936 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
937 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
938 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
939 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
940 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
941 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
942 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
943 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
944 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
945 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
946 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
947 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
948 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
949 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
950 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
951 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
952 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
953 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
954 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
955 // performed a collection and reclaimed enough space. We do the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
956 // first attempt (without holding the Heap_lock) here and the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
957 // follow-on attempt will be at the start of the next loop |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
958 // iteration (after taking the Heap_lock). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
959 result = _mutator_alloc_region.attempt_allocation(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
960 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
961 if (result != NULL ){ |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
962 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
963 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
964 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
965 // Give a warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
966 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
967 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
968 warning("G1CollectedHeap::attempt_allocation_slow() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
969 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
970 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
971 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
972 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
973 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
974 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
975 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
976 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
977 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
978 unsigned int * gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
979 // The structure of this method has a lot of similarities to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
980 // attempt_allocation_slow(). The reason these two were not merged |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
981 // into a single one is that such a method would require several "if |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
982 // allocation is not humongous do this, otherwise do that" |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
983 // conditional paths which would obscure its flow. In fact, an early |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
984 // version of this code did use a unified method which was harder to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
985 // follow and, as a result, it had subtle bugs that were hard to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
986 // track down. So keeping these two methods separate allows each to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
987 // be more readable. It will be good to keep these two in sync as |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
988 // much as possible. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
989 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
990 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
991 assert(isHumongous(word_size), "attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
992 "should only be called for humongous allocations"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
993 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
994 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
995 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
996 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
997 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
998 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
999 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1000 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1001 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1002 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1003 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1004 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1005 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1006 // Given that humongous objects are not allocated in young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1007 // regions, we'll first try to do the allocation without doing a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1008 // collection hoping that there's enough space in the heap. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1009 result = humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1010 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1011 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1012 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1013 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1014 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1015 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1016 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1017 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1018 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1019 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1020 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1021 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1022 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1023 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1024 // If we failed to allocate the humongous object, we should try to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1025 // do a collection pause (if we're allowed) in case it reclaims |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1026 // enough space for the allocation to succeed after the pause. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1027 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1028 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1029 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1030 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1031 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1032 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1033 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1034 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1035 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1036 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1037 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1038 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1039 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1040 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1041 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1042 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1043 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1044 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1045 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1046 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1047 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1048 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1049 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1050 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1051 // performed a collection and reclaimed enough space. Give a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1052 // warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1053 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1054 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1055 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1056 warning("G1CollectedHeap::attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1057 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1058 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1059 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1060 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1061 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1062 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1063 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1064 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1065 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1066 bool expect_null_mutator_alloc_region) { |
2152 | 1067 assert_at_safepoint(true /* should_be_vm_thread */); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1068 assert(_mutator_alloc_region.get() == NULL || |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1069 !expect_null_mutator_alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1070 "the current alloc region was unexpectedly found to be non-NULL"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1071 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1072 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1073 return _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1074 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1075 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1076 return humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1077 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1078 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1079 ShouldNotReachHere(); |
342 | 1080 } |
1081 | |
636 | 1082 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1083 // first, make sure that the GC alloc region list is empty (it should!) | |
1084 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1085 release_gc_alloc_regions(true /* totally */); | |
1086 } | |
1087 | |
342 | 1088 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1089 ModRefBarrierSet* _mr_bs; | |
1090 public: | |
1091 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1092 bool doHeapRegion(HeapRegion* r) { | |
1093 r->reset_gc_time_stamp(); | |
1094 if (r->continuesHumongous()) | |
1095 return false; | |
1096 HeapRegionRemSet* hrrs = r->rem_set(); | |
1097 if (hrrs != NULL) hrrs->clear(); | |
1098 // You might think here that we could clear just the cards | |
1099 // corresponding to the used region. But no: if we leave a dirty card | |
1100 // in a region we might allocate into, then it would prevent that card | |
1101 // from being enqueued, and cause it to be missed. | |
1102 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1103 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1104 return false; | |
1105 } | |
1106 }; | |
1107 | |
1108 | |
1109 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1110 ModRefBarrierSet* _mr_bs; | |
1111 public: | |
1112 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1113 bool doHeapRegion(HeapRegion* r) { | |
1114 if (r->continuesHumongous()) return false; | |
1115 if (r->used_region().word_size() != 0) { | |
1116 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1117 } | |
1118 return false; | |
1119 } | |
1120 }; | |
1121 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1122 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1123 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1124 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1125 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1126 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1127 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1128 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1129 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1130 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1131 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1132 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1133 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1134 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1135 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1136 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1137 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1138 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1139 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1140 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1141 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1142 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1143 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1144 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1145 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1146 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1147 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1148 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1149 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1150 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1151 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1152 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1153 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1154 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1155 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1156 |
1973 | 1157 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1158 bool clear_all_soft_refs, |
342 | 1159 size_t word_size) { |
2152 | 1160 assert_at_safepoint(true /* should_be_vm_thread */); |
1161 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1162 if (GC_locker::check_active_before_gc()) { |
1973 | 1163 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1164 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1165 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
1166 SvcGCMarker sgcm(SvcGCMarker::FULL); |
342 | 1167 ResourceMark rm; |
1168 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1169 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1170 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1171 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1172 |
2152 | 1173 verify_region_sets_optional(); |
342 | 1174 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1175 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1176 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1177 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1178 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1179 |
342 | 1180 { |
1181 IsGCActiveMark x; | |
1182 | |
1183 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1184 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1185 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1186 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1187 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1188 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1189 PrintGC, true, gclog_or_tty); |
342 | 1190 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
1191 TraceCollectorStats tcs(g1mm()->full_collection_counters()); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3323
diff
changeset
|
1192 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1193 |
342 | 1194 double start = os::elapsedTime(); |
1195 g1_policy()->record_full_collection_start(); | |
1196 | |
2152 | 1197 wait_while_free_regions_coming(); |
2361 | 1198 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 1199 |
342 | 1200 gc_prologue(true); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1201 increment_total_collections(true /* full gc */); |
342 | 1202 |
1203 size_t g1h_prev_used = used(); | |
1204 assert(used() == recalculate_used(), "Should be equal"); | |
1205 | |
1206 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1207 HandleMark hm; // Discard invalid handles created during verification | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1208 gclog_or_tty->print(" VerifyBeforeGC:"); |
342 | 1209 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1210 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1211 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1212 /* option */ VerifyOption_G1UsePrevMarking); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1213 |
342 | 1214 } |
1215 | |
1216 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1217 | |
1218 // We want to discover references, but not process them yet. | |
1219 // This mode is disabled in | |
1220 // instanceRefKlass::process_discovered_references if the | |
1221 // generation does some collection work, or | |
1222 // instanceRefKlass::enqueue_discovered_references if the | |
1223 // generation returns without doing any work. | |
1224 ref_processor()->disable_discovery(); | |
1225 ref_processor()->abandon_partial_discovery(); | |
1226 ref_processor()->verify_no_references_recorded(); | |
1227 | |
1228 // Abandon current iterations of concurrent marking and concurrent | |
1229 // refinement, if any are in progress. | |
1230 concurrent_mark()->abort(); | |
1231 | |
1232 // Make sure we'll choose a new allocation region afterwards. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1233 release_mutator_alloc_region(); |
636 | 1234 abandon_gc_alloc_regions(); |
1861 | 1235 g1_rem_set()->cleanupHRRS(); |
342 | 1236 tear_down_region_lists(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1237 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1238 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1239 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1240 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1241 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1242 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1243 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1244 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1245 |
342 | 1246 if (g1_policy()->in_young_gc_mode()) { |
1247 empty_young_list(); | |
1248 g1_policy()->set_full_young_gcs(true); | |
1249 } | |
1250 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1251 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1252 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1253 |
342 | 1254 // Temporarily make reference _discovery_ single threaded (non-MT). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
1255 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); |
342 | 1256 |
1257 // Temporarily make refs discovery atomic | |
1258 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1259 | |
1260 // Temporarily clear _is_alive_non_header | |
1261 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1262 | |
1263 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1264 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1265 // Do collection work |
1266 { | |
1267 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1268 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1269 } |
2152 | 1270 assert(free_regions() == 0, "we should not have added any free regions"); |
342 | 1271 rebuild_region_lists(); |
1272 | |
1273 _summary_bytes_used = recalculate_used(); | |
1274 | |
1275 ref_processor()->enqueue_discovered_references(); | |
1276 | |
1277 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1278 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1279 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1280 |
342 | 1281 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1282 HandleMark hm; // Discard invalid handles created during verification | |
1283 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1284 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1285 Universe::verify(/* allow dirty */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1286 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1287 /* option */ VerifyOption_G1UsePrevMarking); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1288 |
342 | 1289 } |
1290 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1291 | |
1292 reset_gc_time_stamp(); | |
1293 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1294 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1295 // sets. We will also reset the GC time stamps of the regions. |
342 | 1296 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1297 heap_region_iterate(&rs_clear); | |
1298 | |
1299 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1300 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1301 |
1302 if (_cg1r->use_cache()) { | |
1303 _cg1r->clear_and_record_card_counts(); | |
1304 _cg1r->clear_hot_cache(); | |
1305 } | |
1306 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1307 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1308 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1309 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1310 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1311 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1312 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1313 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1314 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1315 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1316 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1317 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1318 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1321 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1322 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1323 |
342 | 1324 if (PrintGC) { |
1325 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1326 } | |
1327 | |
1328 if (true) { // FIXME | |
1329 // Ask the permanent generation to adjust size for full collections | |
1330 perm()->compute_new_size(); | |
1331 } | |
1332 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1333 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1334 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1335 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1336 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1337 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1338 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1339 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1340 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1341 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1342 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1343 |
342 | 1344 double end = os::elapsedTime(); |
1345 g1_policy()->record_full_collection_end(); | |
1346 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1347 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1348 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1349 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1350 |
342 | 1351 gc_epilogue(true); |
1352 | |
794 | 1353 // Discard all rset updates |
1354 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1355 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1356 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1357 } |
1358 | |
1359 if (g1_policy()->in_young_gc_mode()) { | |
1360 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1361 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1362 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1363 assert( check_young_list_empty(true /* check_heap */), |
342 | 1364 "young list should be empty at this point"); |
1365 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1366 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1367 // Update the number of full collections that have been completed. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
1368 increment_full_collections_completed(false /* concurrent */); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1369 |
3766 | 1370 _hrs.verify_optional(); |
2152 | 1371 verify_region_sets_optional(); |
1372 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1373 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1374 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1375 } |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
1376 g1mm()->update_counters(); |
1973 | 1377 |
1378 return true; | |
342 | 1379 } |
1380 | |
1381 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1382 // do_collection() will return whether it succeeded in performing |
1383 // the GC. Currently, there is no facility on the | |
1384 // do_full_collection() API to notify the caller than the collection | |
1385 // did not succeed (e.g., because it was locked out by the GC | |
1386 // locker). So, right now, we'll ignore the return value. | |
1387 bool dummy = do_collection(true, /* explicit_gc */ | |
1388 clear_all_soft_refs, | |
1389 0 /* word_size */); | |
342 | 1390 } |
1391 | |
1392 // This code is mostly copied from TenuredGeneration. | |
1393 void | |
1394 G1CollectedHeap:: | |
1395 resize_if_necessary_after_full_collection(size_t word_size) { | |
1396 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1397 | |
1398 // Include the current allocation, if any, and bytes that will be | |
1399 // pre-allocated to support collections, as "used". | |
1400 const size_t used_after_gc = used(); | |
1401 const size_t capacity_after_gc = capacity(); | |
1402 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1403 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1404 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1405 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1406 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1407 |
342 | 1408 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1409 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1410 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1411 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1412 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1413 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1414 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1415 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1416 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1417 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1418 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1419 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1420 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1421 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1422 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1423 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1424 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1425 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1426 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1427 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1428 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1429 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1430 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1432 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1433 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1438 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1439 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1440 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1441 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1443 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1444 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1445 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1446 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1447 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1448 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1449 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1450 |
1451 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1452 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1453 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1454 gclog_or_tty->print_cr("Computing new size after full GC "); |
1455 gclog_or_tty->print_cr(" " | |
1456 " minimum_free_percentage: %6.2f", | |
1457 minimum_free_percentage); | |
1458 gclog_or_tty->print_cr(" " | |
1459 " maximum_free_percentage: %6.2f", | |
1460 maximum_free_percentage); | |
1461 gclog_or_tty->print_cr(" " | |
1462 " capacity: %6.1fK" | |
1463 " minimum_desired_capacity: %6.1fK" | |
1464 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1465 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1466 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1467 (double) maximum_desired_capacity / (double) K); |
342 | 1468 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1469 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1470 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1471 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1472 (double) used_after_gc / (double) K); |
342 | 1473 gclog_or_tty->print_cr(" " |
1474 " free_percentage: %6.2f", | |
1475 free_percentage); | |
1476 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1477 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1478 // Don't expand unless it's significant |
1479 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1480 if (expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1481 if (PrintGC && Verbose) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1482 gclog_or_tty->print_cr(" " |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1483 " expanding:" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1484 " max_heap_size: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1485 " minimum_desired_capacity: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1486 " expand_bytes: %6.1fK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1487 (double) max_heap_size / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1488 (double) minimum_desired_capacity / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1489 (double) expand_bytes / (double) K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1490 } |
342 | 1491 } |
1492 | |
1493 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1494 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1495 // Capacity too large, compute shrinking size |
1496 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1497 shrink(shrink_bytes); | |
1498 if (PrintGC && Verbose) { | |
1499 gclog_or_tty->print_cr(" " | |
1500 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1501 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1502 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1503 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1504 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1505 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1506 (double) shrink_bytes / (double) K); |
342 | 1507 } |
1508 } | |
1509 } | |
1510 | |
1511 | |
1512 HeapWord* | |
1973 | 1513 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1514 bool* succeeded) { | |
2152 | 1515 assert_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1516 |
1517 *succeeded = true; | |
1518 // Let's attempt the allocation first. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1519 HeapWord* result = |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1520 attempt_allocation_at_safepoint(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1521 false /* expect_null_mutator_alloc_region */); |
1973 | 1522 if (result != NULL) { |
1523 assert(*succeeded, "sanity"); | |
1524 return result; | |
1525 } | |
342 | 1526 |
1527 // In a G1 heap, we're supposed to keep allocation from failing by | |
1528 // incremental pauses. Therefore, at least for now, we'll favor | |
1529 // expansion over collection. (This might change in the future if we can | |
1530 // do something smarter than full collection to satisfy a failed alloc.) | |
1531 result = expand_and_allocate(word_size); | |
1532 if (result != NULL) { | |
1973 | 1533 assert(*succeeded, "sanity"); |
342 | 1534 return result; |
1535 } | |
1536 | |
1973 | 1537 // Expansion didn't work, we'll try to do a Full GC. |
1538 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1539 false, /* clear_all_soft_refs */ | |
1540 word_size); | |
1541 if (!gc_succeeded) { | |
1542 *succeeded = false; | |
1543 return NULL; | |
1544 } | |
1545 | |
1546 // Retry the allocation | |
1547 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1548 true /* expect_null_mutator_alloc_region */); |
342 | 1549 if (result != NULL) { |
1973 | 1550 assert(*succeeded, "sanity"); |
342 | 1551 return result; |
1552 } | |
1553 | |
1973 | 1554 // Then, try a Full GC that will collect all soft references. |
1555 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1556 true, /* clear_all_soft_refs */ | |
1557 word_size); | |
1558 if (!gc_succeeded) { | |
1559 *succeeded = false; | |
1560 return NULL; | |
1561 } | |
1562 | |
1563 // Retry the allocation once more | |
1564 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1565 true /* expect_null_mutator_alloc_region */); |
342 | 1566 if (result != NULL) { |
1973 | 1567 assert(*succeeded, "sanity"); |
342 | 1568 return result; |
1569 } | |
1570 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1571 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1572 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1573 |
342 | 1574 // What else? We might try synchronous finalization later. If the total |
1575 // space available is large enough for the allocation, then a more | |
1576 // complete compaction phase than we've tried so far might be | |
1577 // appropriate. | |
1973 | 1578 assert(*succeeded, "sanity"); |
342 | 1579 return NULL; |
1580 } | |
1581 | |
1582 // Attempting to expand the heap sufficiently | |
1583 // to support an allocation of the given "word_size". If | |
1584 // successful, perform the allocation and return the address of the | |
1585 // allocated block, or else "NULL". | |
1586 | |
1587 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
2152 | 1588 assert_at_safepoint(true /* should_be_vm_thread */); |
1589 | |
1590 verify_region_sets_optional(); | |
1973 | 1591 |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1592 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1593 if (expand(expand_bytes)) { |
3766 | 1594 _hrs.verify_optional(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1595 verify_region_sets_optional(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1596 return attempt_allocation_at_safepoint(word_size, |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1597 false /* expect_null_mutator_alloc_region */); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1598 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1599 return NULL; |
342 | 1600 } |
1601 | |
3766 | 1602 void G1CollectedHeap::update_committed_space(HeapWord* old_end, |
1603 HeapWord* new_end) { | |
1604 assert(old_end != new_end, "don't call this otherwise"); | |
1605 assert((HeapWord*) _g1_storage.high() == new_end, "invariant"); | |
1606 | |
1607 // Update the committed mem region. | |
1608 _g1_committed.set_end(new_end); | |
1609 // Tell the card table about the update. | |
1610 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1611 // Tell the BOT about the update. | |
1612 _bot_shared->resize(_g1_committed.word_size()); | |
1613 } | |
1614 | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1615 bool G1CollectedHeap::expand(size_t expand_bytes) { |
342 | 1616 size_t old_mem_size = _g1_storage.committed_size(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1617 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
342 | 1618 aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
1619 HeapRegion::GrainBytes); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1620 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1621 if (Verbose && PrintGC) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1622 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1623 old_mem_size/K, aligned_expand_bytes/K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1624 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1625 |
3766 | 1626 // First commit the memory. |
1627 HeapWord* old_end = (HeapWord*) _g1_storage.high(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1628 bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1629 if (successful) { |
3766 | 1630 // Then propagate this update to the necessary data structures. |
1631 HeapWord* new_end = (HeapWord*) _g1_storage.high(); | |
1632 update_committed_space(old_end, new_end); | |
1633 | |
1634 FreeRegionList expansion_list("Local Expansion List"); | |
1635 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list); | |
1636 assert(mr.start() == old_end, "post-condition"); | |
1637 // mr might be a smaller region than what was requested if | |
1638 // expand_by() was unable to allocate the HeapRegion instances | |
1639 assert(mr.end() <= new_end, "post-condition"); | |
1640 | |
1641 size_t actual_expand_bytes = mr.byte_size(); | |
1642 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); | |
1643 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), | |
1644 "post-condition"); | |
1645 if (actual_expand_bytes < aligned_expand_bytes) { | |
1646 // We could not expand _hrs to the desired size. In this case we | |
1647 // need to shrink the committed space accordingly. | |
1648 assert(mr.end() < new_end, "invariant"); | |
1649 | |
1650 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes; | |
1651 // First uncommit the memory. | |
1652 _g1_storage.shrink_by(diff_bytes); | |
1653 // Then propagate this update to the necessary data structures. | |
1654 update_committed_space(new_end, mr.end()); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1655 } |
3766 | 1656 _free_list.add_as_tail(&expansion_list); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1657 } else { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1658 // The expansion of the virtual storage space was unsuccessful. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1659 // Let's see if it was because we ran out of swap. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1660 if (G1ExitOnExpansionFailure && |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1661 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1662 // We had head room... |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1663 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); |
342 | 1664 } |
1665 } | |
2152 | 1666 |
342 | 1667 if (Verbose && PrintGC) { |
1668 size_t new_mem_size = _g1_storage.committed_size(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1669 gclog_or_tty->print_cr("...%s, expanded to %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1670 (successful ? "Successful" : "Failed"), |
342 | 1671 new_mem_size/K); |
1672 } | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1673 return successful; |
342 | 1674 } |
1675 | |
3766 | 1676 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { |
342 | 1677 size_t old_mem_size = _g1_storage.committed_size(); |
1678 size_t aligned_shrink_bytes = | |
1679 ReservedSpace::page_align_size_down(shrink_bytes); | |
1680 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1681 HeapRegion::GrainBytes); | |
1682 size_t num_regions_deleted = 0; | |
3766 | 1683 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); |
1684 HeapWord* old_end = (HeapWord*) _g1_storage.high(); | |
1685 assert(mr.end() == old_end, "post-condition"); | |
1686 if (mr.byte_size() > 0) { | |
342 | 1687 _g1_storage.shrink_by(mr.byte_size()); |
3766 | 1688 HeapWord* new_end = (HeapWord*) _g1_storage.high(); |
1689 assert(mr.start() == new_end, "post-condition"); | |
1690 | |
1691 _expansion_regions += num_regions_deleted; | |
1692 update_committed_space(old_end, new_end); | |
1693 HeapRegionRemSet::shrink_heap(n_regions()); | |
1694 | |
1695 if (Verbose && PrintGC) { | |
1696 size_t new_mem_size = _g1_storage.committed_size(); | |
1697 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1698 old_mem_size/K, aligned_shrink_bytes/K, | |
1699 new_mem_size/K); | |
1700 } | |
342 | 1701 } |
1702 } | |
1703 | |
1704 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
2152 | 1705 verify_region_sets_optional(); |
1706 | |
636 | 1707 release_gc_alloc_regions(true /* totally */); |
2152 | 1708 // Instead of tearing down / rebuilding the free lists here, we |
1709 // could instead use the remove_all_pending() method on free_list to | |
1710 // remove only the ones that we need to remove. | |
342 | 1711 tear_down_region_lists(); // We will rebuild them in a moment. |
1712 shrink_helper(shrink_bytes); | |
1713 rebuild_region_lists(); | |
2152 | 1714 |
3766 | 1715 _hrs.verify_optional(); |
2152 | 1716 verify_region_sets_optional(); |
342 | 1717 } |
1718 | |
1719 // Public methods. | |
1720 | |
1721 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1722 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1723 #endif // _MSC_VER | |
1724 | |
1725 | |
1726 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1727 SharedHeap(policy_), | |
1728 _g1_policy(policy_), | |
1111 | 1729 _dirty_card_queue_set(false), |
1705 | 1730 _into_cset_dirty_card_queue_set(false), |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
1731 _is_alive_closure(this), |
342 | 1732 _ref_processor(NULL), |
1733 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1734 _bot_shared(NULL), | |
1735 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1736 _evac_failure_scan_stack(NULL) , | |
1737 _mark_in_progress(false), | |
2152 | 1738 _cg1r(NULL), _summary_bytes_used(0), |
342 | 1739 _refine_cte_cl(NULL), |
1740 _full_collection(false), | |
2152 | 1741 _free_list("Master Free List"), |
1742 _secondary_free_list("Secondary Free List"), | |
1743 _humongous_set("Master Humongous Set"), | |
1744 _free_regions_coming(false), | |
342 | 1745 _young_list(new YoungList(this)), |
1746 _gc_time_stamp(0), | |
526 | 1747 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1748 _full_collections_completed(0), |
526 | 1749 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1750 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1751 _dirty_cards_region_list(NULL) { |
342 | 1752 _g1h = this; // To catch bugs. |
1753 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1754 vm_exit_during_initialization("Failed necessary allocation."); | |
1755 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1756 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1757 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1758 |
342 | 1759 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1760 _task_queues = new RefToScanQueueSet(n_queues); | |
1761 | |
1762 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1763 assert(n_rem_sets > 0, "Invariant."); | |
1764 | |
1765 HeapRegionRemSetIterator** iter_arr = | |
1766 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1767 for (int i = 0; i < n_queues; i++) { | |
1768 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1769 } | |
1770 _rem_set_iterator = iter_arr; | |
1771 | |
1772 for (int i = 0; i < n_queues; i++) { | |
1773 RefToScanQueue* q = new RefToScanQueue(); | |
1774 q->initialize(); | |
1775 _task_queues->register_queue(i, q); | |
1776 } | |
1777 | |
1778 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1779 _gc_alloc_regions[ap] = NULL; |
1780 _gc_alloc_region_counts[ap] = 0; | |
1781 _retained_gc_alloc_regions[ap] = NULL; | |
1782 // by default, we do not retain a GC alloc region for each ap; | |
1783 // we'll override this, when appropriate, below | |
1784 _retain_gc_alloc_region[ap] = false; | |
1785 } | |
1786 | |
1787 // We will try to remember the last half-full tenured region we | |
1788 // allocated to at the end of a collection so that we can re-use it | |
1789 // during the next collection. | |
1790 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1791 | |
342 | 1792 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1793 } | |
1794 | |
1795 jint G1CollectedHeap::initialize() { | |
1166 | 1796 CollectedHeap::pre_initialize(); |
342 | 1797 os::enable_vtime(); |
1798 | |
1799 // Necessary to satisfy locking discipline assertions. | |
1800 | |
1801 MutexLocker x(Heap_lock); | |
1802 | |
1803 // While there are no constraints in the GC code that HeapWordSize | |
1804 // be any particular value, there are multiple other areas in the | |
1805 // system which believe this to be true (e.g. oop->object_size in some | |
1806 // cases incorrectly returns the size in wordSize units rather than | |
1807 // HeapWordSize). | |
1808 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1809 | |
1810 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1811 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1812 | |
1813 // Ensure that the sizes are properly aligned. | |
1814 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1815 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1816 | |
1817 _cg1r = new ConcurrentG1Refine(); | |
1818 | |
1819 // Reserve the maximum. | |
1820 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1821 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1822 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1823 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1824 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1825 |
342 | 1826 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1827 HeapRegion::GrainBytes, | |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1828 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1829 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1830 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1831 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1832 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1833 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1834 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1835 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1836 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1837 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1838 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1839 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1840 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1841 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1842 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1843 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1844 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1845 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1846 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1847 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1848 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1849 } |
342 | 1850 |
1851 if (!heap_rs.is_reserved()) { | |
1852 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1853 return JNI_ENOMEM; | |
1854 } | |
1855 | |
1856 // It is important to do this in a way such that concurrent readers can't | |
1857 // temporarily think somethings in the heap. (I've actually seen this | |
1858 // happen in asserts: DLD.) | |
1859 _reserved.set_word_size(0); | |
1860 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1861 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1862 | |
1863 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1864 | |
1865 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1866 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1867 set_barrier_set(rem_set()->bs()); | |
1868 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1869 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1870 } else { | |
1871 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1872 return JNI_ENOMEM; | |
1873 } | |
1874 | |
1875 // Also create a G1 rem set. | |
1861 | 1876 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1877 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1878 } else { |
1861 | 1879 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1880 return JNI_ENOMEM; | |
342 | 1881 } |
1882 | |
1883 // Carve out the G1 part of the heap. | |
1884 | |
1885 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1886 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1887 g1_rs.size()/HeapWordSize); | |
1888 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1889 | |
1890 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1891 | |
1892 _g1_storage.initialize(g1_rs, 0); | |
1893 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
3766 | 1894 _hrs.initialize((HeapWord*) _g1_reserved.start(), |
1895 (HeapWord*) _g1_reserved.end(), | |
1896 _expansion_regions); | |
342 | 1897 |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1898 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1899 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1900 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1901 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1902 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1903 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1904 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1905 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1906 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1907 |
2152 | 1908 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); |
1909 | |
342 | 1910 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1911 heap_word_size(init_byte_size)); | |
1912 | |
1913 _g1h = this; | |
1914 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1915 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1916 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1917 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1918 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1919 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1920 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1921 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1922 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1923 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1924 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1925 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1926 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1927 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1928 |
342 | 1929 // Create the ConcurrentMark data structure and thread. |
1930 // (Must do this late, so that "max_regions" is defined.) | |
1931 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1932 _cmThread = _cm->cmThread(); | |
1933 | |
1934 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1935 HeapRegionRemSet::init_heap(max_regions()); | |
1936 | |
677 | 1937 // Now expand into the initial heap size. |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1938 if (!expand(init_byte_size)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1939 vm_exit_during_initialization("Failed to allocate initial heap."); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1940 return JNI_ENOMEM; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1941 } |
342 | 1942 |
1943 // Perform any initialization actions delegated to the policy. | |
1944 g1_policy()->init(); | |
1945 | |
1946 g1_policy()->note_start_of_mark_thread(); | |
1947 | |
1948 _refine_cte_cl = | |
1949 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1950 g1_rem_set(), | |
1951 concurrent_g1_refine()); | |
1952 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1953 | |
1954 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1955 SATB_Q_FL_lock, | |
1111 | 1956 G1SATBProcessCompletedThreshold, |
342 | 1957 Shared_SATB_Q_lock); |
794 | 1958 |
1959 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1960 DirtyCardQ_FL_lock, | |
1111 | 1961 concurrent_g1_refine()->yellow_zone(), |
1962 concurrent_g1_refine()->red_zone(), | |
794 | 1963 Shared_DirtyCardQ_lock); |
1964 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1965 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1966 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1967 DirtyCardQ_FL_lock, |
1111 | 1968 -1, // never trigger processing |
1969 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1970 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1971 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1972 } |
1705 | 1973 |
1974 // Initialize the card queue set used to hold cards containing | |
1975 // references into the collection set. | |
1976 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1977 DirtyCardQ_FL_lock, | |
1978 -1, // never trigger processing | |
1979 -1, // no limit on length | |
1980 Shared_DirtyCardQ_lock, | |
1981 &JavaThread::dirty_card_queue_set()); | |
1982 | |
342 | 1983 // In case we're keeping closure specialization stats, initialize those |
1984 // counts and that mechanism. | |
1985 SpecializationStats::clear(); | |
1986 | |
1987 _gc_alloc_region_list = NULL; | |
1988 | |
1989 // Do later initialization work for concurrent refinement. | |
1990 _cg1r->init(); | |
1991 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1992 // Here we allocate the dummy full region that is required by the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1993 // G1AllocRegion class. If we don't pass an address in the reserved |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1994 // space here, lots of asserts fire. |
3766 | 1995 |
1996 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */, | |
1997 _g1_reserved.start()); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1998 // We'll re-use the same region whether the alloc region will |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1999 // require BOT updates or not and, if it doesn't, then a non-young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2000 // region will complain that it cannot support allocations without |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2001 // BOT updates. So we'll tag the dummy region as young to avoid that. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2002 dummy_region->set_young(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2003 // Make sure it's full. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2004 dummy_region->set_top(dummy_region->end()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2005 G1AllocRegion::setup(this, dummy_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2006 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2007 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2008 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2009 // Do create of the monitoring and management support so that |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2010 // values in the heap have been properly initialized. |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2011 _g1mm = new G1MonitoringSupport(this, &_g1_storage); |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2012 |
342 | 2013 return JNI_OK; |
2014 } | |
2015 | |
2016 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2017 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2018 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2019 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2020 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2021 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2022 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2023 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2024 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2025 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2026 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2027 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2028 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2029 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2030 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2031 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2032 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2033 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2034 |
342 | 2035 SharedHeap::ref_processing_init(); |
2036 MemRegion mr = reserved_region(); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2037 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2038 new ReferenceProcessor(mr, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2039 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2040 (int) ParallelGCThreads, // degree of mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2041 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2042 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2043 false, // Reference discovery is not atomic |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2044 &_is_alive_closure, // is alive closure for efficiency |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2045 true); // Setting next fields of discovered |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2046 // lists requires a barrier. |
342 | 2047 } |
2048 | |
2049 size_t G1CollectedHeap::capacity() const { | |
2050 return _g1_committed.byte_size(); | |
2051 } | |
2052 | |
1705 | 2053 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2054 DirtyCardQueue* into_cset_dcq, | |
2055 bool concurrent, | |
342 | 2056 int worker_i) { |
889 | 2057 // Clean cards in the hot card cache |
1705 | 2058 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2059 |
342 | 2060 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2061 int n_completed_buffers = 0; | |
1705 | 2062 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2063 n_completed_buffers++; |
2064 } | |
2065 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2066 (double) n_completed_buffers); | |
2067 dcqs.clear_n_completed_buffers(); | |
2068 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2069 } | |
2070 | |
2071 | |
2072 // Computes the sum of the storage used by the various regions. | |
2073 | |
2074 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2075 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2076 "Should be owned on this thread's behalf."); |
342 | 2077 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2078 // Read only once in case it is set to NULL concurrently |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2079 HeapRegion* hr = _mutator_alloc_region.get(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2080 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2081 result += hr->used(); |
342 | 2082 return result; |
2083 } | |
2084 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2085 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2086 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2087 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2088 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2089 |
342 | 2090 class SumUsedClosure: public HeapRegionClosure { |
2091 size_t _used; | |
2092 public: | |
2093 SumUsedClosure() : _used(0) {} | |
2094 bool doHeapRegion(HeapRegion* r) { | |
2095 if (!r->continuesHumongous()) { | |
2096 _used += r->used(); | |
2097 } | |
2098 return false; | |
2099 } | |
2100 size_t result() { return _used; } | |
2101 }; | |
2102 | |
2103 size_t G1CollectedHeap::recalculate_used() const { | |
2104 SumUsedClosure blk; | |
3766 | 2105 heap_region_iterate(&blk); |
342 | 2106 return blk.result(); |
2107 } | |
2108 | |
2109 #ifndef PRODUCT | |
2110 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2111 size_t _num; | |
2112 public: | |
677 | 2113 SumUsedRegionsClosure() : _num(0) {} |
342 | 2114 bool doHeapRegion(HeapRegion* r) { |
2115 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2116 _num += 1; | |
2117 } | |
2118 return false; | |
2119 } | |
2120 size_t result() { return _num; } | |
2121 }; | |
2122 | |
2123 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2124 SumUsedRegionsClosure blk; | |
3766 | 2125 heap_region_iterate(&blk); |
342 | 2126 return blk.result(); |
2127 } | |
2128 #endif // PRODUCT | |
2129 | |
2130 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2152 | 2131 if (free_regions() > 0) return HeapRegion::GrainBytes; |
342 | 2132 // otherwise, is there space in the current allocation region? |
2133 | |
2134 // We need to store the current allocation region in a local variable | |
2135 // here. The problem is that this method doesn't take any locks and | |
2136 // there may be other threads which overwrite the current allocation | |
2137 // region field. attempt_allocation(), for example, sets it to NULL | |
2138 // and this can happen *after* the NULL check here but before the call | |
2139 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2140 // to be a problem in the optimized build, since the two loads of the | |
2141 // current allocation region field are optimized away. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2142 HeapRegion* hr = _mutator_alloc_region.get(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2143 if (hr == NULL) { |
342 | 2144 return 0; |
2145 } | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2146 return hr->free(); |
342 | 2147 } |
2148 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2149 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2150 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2151 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2152 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2153 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2154 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2155 #ifndef PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2156 void G1CollectedHeap::allocate_dummy_regions() { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2157 // Let's fill up most of the region |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2158 size_t word_size = HeapRegion::GrainWords - 1024; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2159 // And as a result the region we'll allocate will be humongous. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2160 guarantee(isHumongous(word_size), "sanity"); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2161 |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2162 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2163 // Let's use the existing mechanism for the allocation |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2164 HeapWord* dummy_obj = humongous_obj_allocate(word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2165 if (dummy_obj != NULL) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2166 MemRegion mr(dummy_obj, word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2167 CollectedHeap::fill_with_object(mr); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2168 } else { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2169 // If we can't allocate once, we probably cannot allocate |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2170 // again. Let's get out of the loop. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2171 break; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2172 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2173 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2174 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2175 #endif // !PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2176 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2177 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2178 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2179 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2180 // We assume that if concurrent == true, then the caller is a |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2181 // concurrent thread that was joined the Suspendible Thread |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2182 // Set. If there's ever a cheap way to check this, we should add an |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2183 // assert here. |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2184 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2185 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2186 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2187 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2188 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2189 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2190 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2191 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2192 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2193 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2194 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2195 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2196 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2197 // This is the case for the inner caller, i.e. a Full GC. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2198 assert(concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2199 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2200 (full_collections_started == _full_collections_completed + 2), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2201 err_msg("for inner caller (Full GC): full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2202 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2203 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2204 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2205 // This is the case for the outer caller, i.e. the concurrent cycle. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2206 assert(!concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2207 (full_collections_started == _full_collections_completed + 1), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2208 err_msg("for outer caller (concurrent cycle): " |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2209 "full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2210 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2211 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2212 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2213 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2214 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2215 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2216 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2217 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2218 // incorrectly see that a marking cyle is still in progress. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2219 if (concurrent) { |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2220 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2221 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2222 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2223 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2224 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2225 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2226 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2227 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2228 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2229 |
342 | 2230 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2152 | 2231 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 2232 GCCauseSetter gcs(this, cause); |
2233 switch (cause) { | |
2234 case GCCause::_heap_inspection: | |
2235 case GCCause::_heap_dump: { | |
2236 HandleMark hm; | |
2237 do_full_collection(false); // don't clear all soft refs | |
2238 break; | |
2239 } | |
2240 default: // XXX FIX ME | |
2241 ShouldNotReachHere(); // Unexpected use of this function | |
2242 } | |
2243 } | |
2244 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2245 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2246 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2247 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2248 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2249 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2250 unsigned int full_gc_count_before; |
342 | 2251 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2252 MutexLocker ml(Heap_lock); |
1973 | 2253 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2254 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2255 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2256 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2257 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2258 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2259 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2260 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2261 // concurrent cycle. We're setting word_size to 0 which means that |
2262 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2263 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2264 0, /* word_size */ |
2265 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2266 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2267 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2268 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2269 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2270 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2271 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2272 |
1973 | 2273 // Schedule a standard evacuation pause. We're setting word_size |
2274 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2275 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2276 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2277 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2278 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2279 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2280 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2281 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2282 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2283 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2284 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2285 } |
342 | 2286 } |
2287 } | |
2288 | |
2289 bool G1CollectedHeap::is_in(const void* p) const { | |
3766 | 2290 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p); |
2291 if (hr != NULL) { | |
342 | 2292 return hr->is_in(p); |
2293 } else { | |
2294 return _perm_gen->as_gen()->is_in(p); | |
2295 } | |
2296 } | |
2297 | |
2298 // Iteration functions. | |
2299 | |
2300 // Iterates an OopClosure over all ref-containing fields of objects | |
2301 // within a HeapRegion. | |
2302 | |
2303 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2304 MemRegion _mr; | |
2305 OopClosure* _cl; | |
2306 public: | |
2307 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2308 : _mr(mr), _cl(cl) {} | |
2309 bool doHeapRegion(HeapRegion* r) { | |
2310 if (! r->continuesHumongous()) { | |
2311 r->oop_iterate(_cl); | |
2312 } | |
2313 return false; | |
2314 } | |
2315 }; | |
2316 | |
678 | 2317 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2318 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
3766 | 2319 heap_region_iterate(&blk); |
678 | 2320 if (do_perm) { |
2321 perm_gen()->oop_iterate(cl); | |
2322 } | |
342 | 2323 } |
2324 | |
678 | 2325 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2326 IterateOopClosureRegionClosure blk(mr, cl); |
3766 | 2327 heap_region_iterate(&blk); |
678 | 2328 if (do_perm) { |
2329 perm_gen()->oop_iterate(cl); | |
2330 } | |
342 | 2331 } |
2332 | |
2333 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2334 | |
2335 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2336 ObjectClosure* _cl; | |
2337 public: | |
2338 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2339 bool doHeapRegion(HeapRegion* r) { | |
2340 if (! r->continuesHumongous()) { | |
2341 r->object_iterate(_cl); | |
2342 } | |
2343 return false; | |
2344 } | |
2345 }; | |
2346 | |
678 | 2347 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2348 IterateObjectClosureRegionClosure blk(cl); |
3766 | 2349 heap_region_iterate(&blk); |
678 | 2350 if (do_perm) { |
2351 perm_gen()->object_iterate(cl); | |
2352 } | |
342 | 2353 } |
2354 | |
2355 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2356 // FIXME: is this right? | |
2357 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2358 } | |
2359 | |
2360 // Calls a SpaceClosure on a HeapRegion. | |
2361 | |
2362 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2363 SpaceClosure* _cl; | |
2364 public: | |
2365 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2366 bool doHeapRegion(HeapRegion* r) { | |
2367 _cl->do_space(r); | |
2368 return false; | |
2369 } | |
2370 }; | |
2371 | |
2372 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2373 SpaceClosureRegionClosure blk(cl); | |
3766 | 2374 heap_region_iterate(&blk); |
342 | 2375 } |
2376 | |
3766 | 2377 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { |
2378 _hrs.iterate(cl); | |
342 | 2379 } |
2380 | |
2381 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
3766 | 2382 HeapRegionClosure* cl) const { |
2383 _hrs.iterate_from(r, cl); | |
342 | 2384 } |
2385 | |
2386 void | |
2387 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2388 int worker, | |
2389 jint claim_value) { | |
355 | 2390 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2391 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2392 // try to spread out the starting points of the workers |
2393 const size_t start_index = regions / worker_num * (size_t) worker; | |
2394 | |
2395 // each worker will actually look at all regions | |
2396 for (size_t count = 0; count < regions; ++count) { | |
2397 const size_t index = (start_index + count) % regions; | |
2398 assert(0 <= index && index < regions, "sanity"); | |
2399 HeapRegion* r = region_at(index); | |
2400 // we'll ignore "continues humongous" regions (we'll process them | |
2401 // when we come across their corresponding "start humongous" | |
2402 // region) and regions already claimed | |
2403 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2404 continue; | |
2405 } | |
2406 // OK, try to claim it | |
342 | 2407 if (r->claimHeapRegion(claim_value)) { |
355 | 2408 // success! |
2409 assert(!r->continuesHumongous(), "sanity"); | |
2410 if (r->startsHumongous()) { | |
2411 // If the region is "starts humongous" we'll iterate over its | |
2412 // "continues humongous" first; in fact we'll do them | |
2413 // first. The order is important. In on case, calling the | |
2414 // closure on the "starts humongous" region might de-allocate | |
2415 // and clear all its "continues humongous" regions and, as a | |
2416 // result, we might end up processing them twice. So, we'll do | |
2417 // them first (notice: most closures will ignore them anyway) and | |
2418 // then we'll do the "starts humongous" region. | |
2419 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2420 HeapRegion* chr = region_at(ch_index); | |
2421 | |
2422 // if the region has already been claimed or it's not | |
2423 // "continues humongous" we're done | |
2424 if (chr->claim_value() == claim_value || | |
2425 !chr->continuesHumongous()) { | |
2426 break; | |
2427 } | |
2428 | |
2429 // Noone should have claimed it directly. We can given | |
2430 // that we claimed its "starts humongous" region. | |
2431 assert(chr->claim_value() != claim_value, "sanity"); | |
2432 assert(chr->humongous_start_region() == r, "sanity"); | |
2433 | |
2434 if (chr->claimHeapRegion(claim_value)) { | |
2435 // we should always be able to claim it; noone else should | |
2436 // be trying to claim this region | |
2437 | |
2438 bool res2 = cl->doHeapRegion(chr); | |
2439 assert(!res2, "Should not abort"); | |
2440 | |
2441 // Right now, this holds (i.e., no closure that actually | |
2442 // does something with "continues humongous" regions | |
2443 // clears them). We might have to weaken it in the future, | |
2444 // but let's leave these two asserts here for extra safety. | |
2445 assert(chr->continuesHumongous(), "should still be the case"); | |
2446 assert(chr->humongous_start_region() == r, "sanity"); | |
2447 } else { | |
2448 guarantee(false, "we should not reach here"); | |
2449 } | |
2450 } | |
2451 } | |
2452 | |
2453 assert(!r->continuesHumongous(), "sanity"); | |
2454 bool res = cl->doHeapRegion(r); | |
2455 assert(!res, "Should not abort"); | |
2456 } | |
2457 } | |
2458 } | |
2459 | |
390 | 2460 class ResetClaimValuesClosure: public HeapRegionClosure { |
2461 public: | |
2462 bool doHeapRegion(HeapRegion* r) { | |
2463 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2464 return false; | |
2465 } | |
2466 }; | |
2467 | |
2468 void | |
2469 G1CollectedHeap::reset_heap_region_claim_values() { | |
2470 ResetClaimValuesClosure blk; | |
2471 heap_region_iterate(&blk); | |
2472 } | |
2473 | |
355 | 2474 #ifdef ASSERT |
2475 // This checks whether all regions in the heap have the correct claim | |
2476 // value. I also piggy-backed on this a check to ensure that the | |
2477 // humongous_start_region() information on "continues humongous" | |
2478 // regions is correct. | |
2479 | |
2480 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2481 private: | |
2482 jint _claim_value; | |
2483 size_t _failures; | |
2484 HeapRegion* _sh_region; | |
2485 public: | |
2486 CheckClaimValuesClosure(jint claim_value) : | |
2487 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2488 bool doHeapRegion(HeapRegion* r) { | |
2489 if (r->claim_value() != _claim_value) { | |
2490 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2491 "claim value = %d, should be %d", | |
2492 r->bottom(), r->end(), r->claim_value(), | |
2493 _claim_value); | |
2494 ++_failures; | |
2495 } | |
2496 if (!r->isHumongous()) { | |
2497 _sh_region = NULL; | |
2498 } else if (r->startsHumongous()) { | |
2499 _sh_region = r; | |
2500 } else if (r->continuesHumongous()) { | |
2501 if (r->humongous_start_region() != _sh_region) { | |
2502 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2503 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2504 r->bottom(), r->end(), | |
2505 r->humongous_start_region(), | |
2506 _sh_region); | |
2507 ++_failures; | |
342 | 2508 } |
2509 } | |
355 | 2510 return false; |
2511 } | |
2512 size_t failures() { | |
2513 return _failures; | |
2514 } | |
2515 }; | |
2516 | |
2517 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2518 CheckClaimValuesClosure cl(claim_value); | |
2519 heap_region_iterate(&cl); | |
2520 return cl.failures() == 0; | |
2521 } | |
2522 #endif // ASSERT | |
342 | 2523 |
2524 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2525 HeapRegion* r = g1_policy()->collection_set(); | |
2526 while (r != NULL) { | |
2527 HeapRegion* next = r->next_in_collection_set(); | |
2528 if (cl->doHeapRegion(r)) { | |
2529 cl->incomplete(); | |
2530 return; | |
2531 } | |
2532 r = next; | |
2533 } | |
2534 } | |
2535 | |
2536 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2537 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2538 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2539 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2540 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2541 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2542 |
342 | 2543 assert(r->in_collection_set(), |
2544 "Start region must be a member of the collection set."); | |
2545 HeapRegion* cur = r; | |
2546 while (cur != NULL) { | |
2547 HeapRegion* next = cur->next_in_collection_set(); | |
2548 if (cl->doHeapRegion(cur) && false) { | |
2549 cl->incomplete(); | |
2550 return; | |
2551 } | |
2552 cur = next; | |
2553 } | |
2554 cur = g1_policy()->collection_set(); | |
2555 while (cur != r) { | |
2556 HeapRegion* next = cur->next_in_collection_set(); | |
2557 if (cl->doHeapRegion(cur) && false) { | |
2558 cl->incomplete(); | |
2559 return; | |
2560 } | |
2561 cur = next; | |
2562 } | |
2563 } | |
2564 | |
2565 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
3766 | 2566 return n_regions() > 0 ? region_at(0) : NULL; |
342 | 2567 } |
2568 | |
2569 | |
2570 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2571 Space* res = heap_region_containing(addr); | |
2572 if (res == NULL) | |
2573 res = perm_gen()->space_containing(addr); | |
2574 return res; | |
2575 } | |
2576 | |
2577 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2578 Space* sp = space_containing(addr); | |
2579 if (sp != NULL) { | |
2580 return sp->block_start(addr); | |
2581 } | |
2582 return NULL; | |
2583 } | |
2584 | |
2585 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2586 Space* sp = space_containing(addr); | |
2587 assert(sp != NULL, "block_size of address outside of heap"); | |
2588 return sp->block_size(addr); | |
2589 } | |
2590 | |
2591 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2592 Space* sp = space_containing(addr); | |
2593 return sp->block_is_obj(addr); | |
2594 } | |
2595 | |
2596 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2597 return true; | |
2598 } | |
2599 | |
2600 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2601 return HeapRegion::GrainBytes; | |
2602 } | |
2603 | |
2604 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2605 // Return the remaining space in the cur alloc region, but not less than | |
2606 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2607 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2608 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2609 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2610 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2611 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2612 HeapRegion* hr = _mutator_alloc_region.get(); |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2613 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2614 if (hr == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2615 return max_tlab_size; |
342 | 2616 } else { |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2617 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); |
342 | 2618 } |
2619 } | |
2620 | |
2621 size_t G1CollectedHeap::max_capacity() const { | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
2622 return _g1_reserved.byte_size(); |
342 | 2623 } |
2624 | |
2625 jlong G1CollectedHeap::millis_since_last_gc() { | |
2626 // assert(false, "NYI"); | |
2627 return 0; | |
2628 } | |
2629 | |
2630 void G1CollectedHeap::prepare_for_verify() { | |
2631 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2632 ensure_parsability(false); | |
2633 } | |
2634 g1_rem_set()->prepare_for_verify(); | |
2635 } | |
2636 | |
2637 class VerifyLivenessOopClosure: public OopClosure { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2638 G1CollectedHeap* _g1h; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2639 VerifyOption _vo; |
342 | 2640 public: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2641 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo): |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2642 _g1h(g1h), _vo(vo) |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2643 { } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2644 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2645 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2646 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2647 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2648 oop obj = oopDesc::load_decode_heap_oop(p); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2649 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2650 "Dead object referenced by a not dead object"); |
342 | 2651 } |
2652 }; | |
2653 | |
2654 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2655 private: |
342 | 2656 G1CollectedHeap* _g1h; |
2657 size_t _live_bytes; | |
2658 HeapRegion *_hr; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2659 VerifyOption _vo; |
342 | 2660 public: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2661 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2662 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2663 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2664 VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo) |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2665 : _live_bytes(0), _hr(hr), _vo(vo) { |
342 | 2666 _g1h = G1CollectedHeap::heap(); |
2667 } | |
2668 void do_object(oop o) { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2669 VerifyLivenessOopClosure isLive(_g1h, _vo); |
342 | 2670 assert(o != NULL, "Huh?"); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2671 if (!_g1h->is_obj_dead_cond(o, _vo)) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2672 // If the object is alive according to the mark word, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2673 // then verify that the marking information agrees. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2674 // Note we can't verify the contra-positive of the |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2675 // above: if the object is dead (according to the mark |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2676 // word), it may not be marked, or may have been marked |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2677 // but has since became dead, or may have been allocated |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2678 // since the last marking. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2679 if (_vo == VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2680 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch"); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2681 } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2682 |
342 | 2683 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2684 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2685 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2686 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2687 } |
342 | 2688 } |
2689 } | |
2690 size_t live_bytes() { return _live_bytes; } | |
2691 }; | |
2692 | |
2693 class PrintObjsInRegionClosure : public ObjectClosure { | |
2694 HeapRegion *_hr; | |
2695 G1CollectedHeap *_g1; | |
2696 public: | |
2697 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2698 _g1 = G1CollectedHeap::heap(); | |
2699 }; | |
2700 | |
2701 void do_object(oop o) { | |
2702 if (o != NULL) { | |
2703 HeapWord *start = (HeapWord *) o; | |
2704 size_t word_sz = o->size(); | |
2705 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2706 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2707 (void*) o, word_sz, | |
2708 _g1->isMarkedPrev(o), | |
2709 _g1->isMarkedNext(o), | |
2710 _hr->obj_allocated_since_prev_marking(o)); | |
2711 HeapWord *end = start + word_sz; | |
2712 HeapWord *cur; | |
2713 int *val; | |
2714 for (cur = start; cur < end; cur++) { | |
2715 val = (int *) cur; | |
2716 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2717 } | |
2718 } | |
2719 } | |
2720 }; | |
2721 | |
2722 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2723 private: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2724 bool _allow_dirty; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2725 bool _par; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2726 VerifyOption _vo; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2727 bool _failures; |
811 | 2728 public: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2729 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2730 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2731 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2732 VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2733 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2734 _par(par), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2735 _vo(vo), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2736 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2737 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2738 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2739 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2740 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2741 |
342 | 2742 bool doHeapRegion(HeapRegion* r) { |
390 | 2743 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2744 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2745 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2746 bool failures = false; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2747 r->verify(_allow_dirty, _vo, &failures); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2748 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2749 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2750 } else { |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2751 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2752 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2753 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2754 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2755 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2756 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2757 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2758 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2759 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2760 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2761 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2762 } |
342 | 2763 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2764 return false; // stop the region iteration if we hit a failure |
342 | 2765 } |
2766 }; | |
2767 | |
2768 class VerifyRootsClosure: public OopsInGenClosure { | |
2769 private: | |
2770 G1CollectedHeap* _g1h; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2771 VerifyOption _vo; |
342 | 2772 bool _failures; |
2773 public: | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2774 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2775 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2776 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2777 VerifyRootsClosure(VerifyOption vo) : |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2778 _g1h(G1CollectedHeap::heap()), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2779 _vo(vo), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2780 _failures(false) { } |
342 | 2781 |
2782 bool failures() { return _failures; } | |
2783 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2784 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2785 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2786 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2787 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2788 if (_g1h->is_obj_dead_cond(obj, _vo)) { |
342 | 2789 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2790 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2791 if (_vo == VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2792 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark())); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2793 } |
342 | 2794 obj->print_on(gclog_or_tty); |
2795 _failures = true; | |
2796 } | |
2797 } | |
2798 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2799 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2800 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2801 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2802 }; |
2803 | |
390 | 2804 // This is the task used for parallel heap verification. |
2805 | |
2806 class G1ParVerifyTask: public AbstractGangTask { | |
2807 private: | |
2808 G1CollectedHeap* _g1h; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2809 bool _allow_dirty; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2810 VerifyOption _vo; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2811 bool _failures; |
390 | 2812 |
2813 public: | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2814 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2815 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2816 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2817 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) : |
390 | 2818 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2819 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2820 _allow_dirty(allow_dirty), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2821 _vo(vo), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2822 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2823 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2824 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2825 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2826 } |
390 | 2827 |
2828 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2829 HandleMark hm; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2830 VerifyRegionClosure blk(_allow_dirty, true, _vo); |
390 | 2831 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2832 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2833 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2834 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2835 } |
390 | 2836 } |
2837 }; | |
2838 | |
342 | 2839 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2840 verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking); |
811 | 2841 } |
2842 | |
2843 void G1CollectedHeap::verify(bool allow_dirty, | |
2844 bool silent, | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2845 VerifyOption vo) { |
342 | 2846 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2847 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2848 VerifyRootsClosure rootsCl(vo); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2849 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2850 |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2851 // We apply the relevant closures to all the oops in the |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2852 // system dictionary, the string table and the code cache. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2853 const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2854 |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2855 process_strong_roots(true, // activate StrongRootsScope |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2856 true, // we set "collecting perm gen" to true, |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2857 // so we don't reset the dirty cards in the perm gen. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2858 SharedHeap::ScanningOption(so), // roots scanning options |
342 | 2859 &rootsCl, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2860 &blobsCl, |
342 | 2861 &rootsCl); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2862 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2863 // If we're verifying after the marking phase of a Full GC then we can't |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2864 // treat the perm gen as roots into the G1 heap. Some of the objects in |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2865 // the perm gen may be dead and hence not marked. If one of these dead |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2866 // objects is considered to be a root then we may end up with a false |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2867 // "Root location <x> points to dead ob <y>" failure. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2868 if (vo != VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2869 // Since we used "collecting_perm_gen" == true above, we will not have |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2870 // checked the refs from perm into the G1-collected heap. We check those |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2871 // references explicitly below. Whether the relevant cards are dirty |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2872 // is checked further below in the rem set verification. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2873 if (!silent) { gclog_or_tty->print("Permgen roots "); } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2874 perm_gen()->oop_iterate(&rootsCl); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2875 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2876 bool failures = rootsCl.failures(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2877 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2878 if (vo != VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2879 // If we're verifying during a full GC then the region sets |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2880 // will have been torn down at the start of the GC. Therefore |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2881 // verifying the region sets will fail. So we only verify |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2882 // the region sets when not in a full GC. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2883 if (!silent) { gclog_or_tty->print("HeapRegionSets "); } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2884 verify_region_sets(); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2885 } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2886 |
2152 | 2887 if (!silent) { gclog_or_tty->print("HeapRegions "); } |
390 | 2888 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2889 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2890 "sanity check"); | |
2891 | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2892 G1ParVerifyTask task(this, allow_dirty, vo); |
390 | 2893 int n_workers = workers()->total_workers(); |
2894 set_par_threads(n_workers); | |
2895 workers()->run_task(&task); | |
2896 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2897 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2898 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2899 } |
390 | 2900 |
2901 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2902 "sanity check"); | |
2903 | |
2904 reset_heap_region_claim_values(); | |
2905 | |
2906 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2907 "sanity check"); | |
2908 } else { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2909 VerifyRegionClosure blk(allow_dirty, false, vo); |
3766 | 2910 heap_region_iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2911 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2912 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2913 } |
390 | 2914 } |
2152 | 2915 if (!silent) gclog_or_tty->print("RemSet "); |
342 | 2916 rem_set()->verify(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2917 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2918 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2919 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2920 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2921 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2922 #ifndef PRODUCT |
1044 | 2923 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2924 concurrent_mark()->print_reachable("at-verification-failure", |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2925 vo, false /* all */); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2926 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2927 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2928 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2929 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2930 guarantee(!failures, "there should not have been any failures"); |
342 | 2931 } else { |
2932 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2933 } | |
2934 } | |
2935 | |
2936 class PrintRegionClosure: public HeapRegionClosure { | |
2937 outputStream* _st; | |
2938 public: | |
2939 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2940 bool doHeapRegion(HeapRegion* r) { | |
2941 r->print_on(_st); | |
2942 return false; | |
2943 } | |
2944 }; | |
2945 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2946 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2947 |
2948 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2949 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2950 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2951 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2952 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2953 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2954 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2955 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2956 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2957 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2958 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2959 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2960 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2961 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2962 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2963 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2964 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2965 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2966 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2967 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2968 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2969 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2970 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2971 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2972 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2973 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2974 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2975 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2976 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2977 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2978 PrintRegionClosure blk(st); |
3766 | 2979 heap_region_iterate(&blk); |
342 | 2980 } |
2981 | |
2982 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2983 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2984 workers()->print_worker_threads_on(st); |
2985 } | |
2986 _cmThread->print_on(st); | |
342 | 2987 st->cr(); |
1019 | 2988 _cm->print_worker_threads_on(st); |
2989 _cg1r->print_worker_threads_on(st); | |
342 | 2990 st->cr(); |
2991 } | |
2992 | |
2993 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2994 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 2995 workers()->threads_do(tc); |
2996 } | |
2997 tc->do_thread(_cmThread); | |
794 | 2998 _cg1r->threads_do(tc); |
342 | 2999 } |
3000 | |
3001 void G1CollectedHeap::print_tracing_info() const { | |
3002 // We'll overload this to mean "trace GC pause statistics." | |
3003 if (TraceGen0Time || TraceGen1Time) { | |
3004 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
3005 // to that. | |
3006 g1_policy()->print_tracing_info(); | |
3007 } | |
751 | 3008 if (G1SummarizeRSetStats) { |
342 | 3009 g1_rem_set()->print_summary_info(); |
3010 } | |
1282 | 3011 if (G1SummarizeConcMark) { |
342 | 3012 concurrent_mark()->print_summary_info(); |
3013 } | |
3014 g1_policy()->print_yg_surv_rate_info(); | |
3015 SpecializationStats::print(); | |
3016 } | |
3017 | |
3777
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3018 #ifndef PRODUCT |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3019 // Helpful for debugging RSet issues. |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3020 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3021 class PrintRSetsClosure : public HeapRegionClosure { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3022 private: |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3023 const char* _msg; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3024 size_t _occupied_sum; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3025 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3026 public: |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3027 bool doHeapRegion(HeapRegion* r) { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3028 HeapRegionRemSet* hrrs = r->rem_set(); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3029 size_t occupied = hrrs->occupied(); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3030 _occupied_sum += occupied; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3031 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3032 gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT, |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3033 HR_FORMAT_PARAMS(r)); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3034 if (occupied == 0) { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3035 gclog_or_tty->print_cr(" RSet is empty"); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3036 } else { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3037 hrrs->print(); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3038 } |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3039 gclog_or_tty->print_cr("----------"); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3040 return false; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3041 } |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3042 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3043 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3044 gclog_or_tty->cr(); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3045 gclog_or_tty->print_cr("========================================"); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3046 gclog_or_tty->print_cr(msg); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3047 gclog_or_tty->cr(); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3048 } |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3049 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3050 ~PrintRSetsClosure() { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3051 gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3052 gclog_or_tty->print_cr("========================================"); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3053 gclog_or_tty->cr(); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3054 } |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3055 }; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3056 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3057 void G1CollectedHeap::print_cset_rsets() { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3058 PrintRSetsClosure cl("Printing CSet RSets"); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3059 collection_set_iterate(&cl); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3060 } |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3061 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3062 void G1CollectedHeap::print_all_rsets() { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3063 PrintRSetsClosure cl("Printing All RSets");; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3064 heap_region_iterate(&cl); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3065 } |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3066 #endif // PRODUCT |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3067 |
342 | 3068 G1CollectedHeap* G1CollectedHeap::heap() { |
3069 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
3070 "not a garbage-first heap"); | |
3071 return _g1h; | |
3072 } | |
3073 | |
3074 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3075 // always_do_update_barrier = false; |
342 | 3076 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3077 // Call allocation profiler | |
3078 AllocationProfiler::iterate_since_last_gc(); | |
3079 // Fill TLAB's and such | |
3080 ensure_parsability(true); | |
3081 } | |
3082 | |
3083 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
3084 // FIXME: what is this about? | |
3085 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
3086 // is set. | |
3087 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
3088 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3089 // always_do_update_barrier = true; |
342 | 3090 } |
3091 | |
1973 | 3092 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3093 unsigned int gc_count_before, | |
3094 bool* succeeded) { | |
3095 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3096 g1_policy()->record_stop_world_start(); |
1973 | 3097 VM_G1IncCollectionPause op(gc_count_before, |
3098 word_size, | |
3099 false, /* should_initiate_conc_mark */ | |
3100 g1_policy()->max_pause_time_ms(), | |
3101 GCCause::_g1_inc_collection_pause); | |
3102 VMThread::execute(&op); | |
3103 | |
3104 HeapWord* result = op.result(); | |
3105 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3106 assert(result == NULL || ret_succeeded, | |
3107 "the result should be NULL if the VM did not succeed"); | |
3108 *succeeded = ret_succeeded; | |
3109 | |
3110 assert_heap_not_locked(); | |
3111 return result; | |
342 | 3112 } |
3113 | |
3114 void | |
3115 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3116 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3117 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3118 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3119 CGC_lock->notify(); |
342 | 3120 } |
3121 } | |
3122 | |
3123 void G1CollectedHeap::do_sync_mark() { | |
3124 _cm->checkpointRootsInitial(); | |
3125 _cm->markFromRoots(); | |
3126 _cm->checkpointRootsFinal(false); | |
3127 } | |
3128 | |
3129 // <NEW PREDICTION> | |
3130 | |
3131 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3132 bool young) { | |
3133 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3134 } | |
3135 | |
3136 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3137 predicted_time_ms) { | |
3138 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3139 } | |
3140 | |
3141 size_t G1CollectedHeap::pending_card_num() { | |
3142 size_t extra_cards = 0; | |
3143 JavaThread *curr = Threads::first(); | |
3144 while (curr != NULL) { | |
3145 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3146 extra_cards += dcq.size(); | |
3147 curr = curr->next(); | |
3148 } | |
3149 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3150 size_t buffer_size = dcqs.buffer_size(); | |
3151 size_t buffer_num = dcqs.completed_buffers_num(); | |
3152 return buffer_size * buffer_num + extra_cards; | |
3153 } | |
3154 | |
3155 size_t G1CollectedHeap::max_pending_card_num() { | |
3156 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3157 size_t buffer_size = dcqs.buffer_size(); | |
3158 size_t buffer_num = dcqs.completed_buffers_num(); | |
3159 int thread_num = Threads::number_of_threads(); | |
3160 return (buffer_num + thread_num) * buffer_size; | |
3161 } | |
3162 | |
3163 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3164 return g1_rem_set()->cardsScanned(); |
342 | 3165 } |
3166 | |
3167 void | |
3168 G1CollectedHeap::setup_surviving_young_words() { | |
3169 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3170 size_t array_length = g1_policy()->young_cset_length(); | |
3171 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3172 if (_surviving_young_words == NULL) { | |
3173 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3174 "Not enough space for young surv words summary."); | |
3175 } | |
3176 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3177 #ifdef ASSERT |
342 | 3178 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3179 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3180 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3181 #endif // !ASSERT |
342 | 3182 } |
3183 | |
3184 void | |
3185 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3186 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3187 size_t array_length = g1_policy()->young_cset_length(); | |
3188 for (size_t i = 0; i < array_length; ++i) | |
3189 _surviving_young_words[i] += surv_young_words[i]; | |
3190 } | |
3191 | |
3192 void | |
3193 G1CollectedHeap::cleanup_surviving_young_words() { | |
3194 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3195 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3196 _surviving_young_words = NULL; | |
3197 } | |
3198 | |
3199 // </NEW PREDICTION> | |
3200 | |
3777
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3201 #ifdef ASSERT |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3202 class VerifyCSetClosure: public HeapRegionClosure { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3203 public: |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3204 bool doHeapRegion(HeapRegion* hr) { |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3205 // Here we check that the CSet region's RSet is ready for parallel |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3206 // iteration. The fields that we'll verify are only manipulated |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3207 // when the region is part of a CSet and is collected. Afterwards, |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3208 // we reset these fields when we clear the region's RSet (when the |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3209 // region is freed) so they are ready when the region is |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3210 // re-allocated. The only exception to this is if there's an |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3211 // evacuation failure and instead of freeing the region we leave |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3212 // it in the heap. In that case, we reset these fields during |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3213 // evacuation failure handling. |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3214 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification"); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3215 |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3216 // Here's a good place to add any other checks we'd like to |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3217 // perform on CSet regions. |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3218 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3219 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3220 }; |
3777
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3221 #endif // ASSERT |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3222 |
1709 | 3223 #if TASKQUEUE_STATS |
3224 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3225 st->print_raw_cr("GC Task Stats"); | |
3226 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3227 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3228 } | |
3229 | |
3230 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3231 print_taskqueue_stats_hdr(st); | |
3232 | |
3233 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3234 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3235 for (int i = 0; i < n; ++i) { |
3236 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3237 totals += task_queue(i)->stats; | |
3238 } | |
3239 st->print_raw("tot "); totals.print(st); st->cr(); | |
3240 | |
3241 DEBUG_ONLY(totals.verify()); | |
3242 } | |
3243 | |
3244 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3245 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3246 for (int i = 0; i < n; ++i) { |
3247 task_queue(i)->stats.reset(); | |
3248 } | |
3249 } | |
3250 #endif // TASKQUEUE_STATS | |
3251 | |
1973 | 3252 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3253 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
2152 | 3254 assert_at_safepoint(true /* should_be_vm_thread */); |
3255 guarantee(!is_gc_active(), "collection is not reentrant"); | |
3256 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3257 if (GC_locker::check_active_before_gc()) { |
1973 | 3258 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3259 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3260 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
3261 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
2039
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3262 ResourceMark rm; |
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3263 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3264 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3265 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3267 |
2152 | 3268 verify_region_sets_optional(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3269 verify_dirty_young_regions(); |
2152 | 3270 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3271 { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3272 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3273 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3274 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3275 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3276 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3277 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3278 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3279 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3280 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3281 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3282 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3283 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3284 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3285 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3286 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3287 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3288 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3289 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3290 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3291 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3292 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3293 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3294 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3295 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3296 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3297 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3298 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3299 TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3323
diff
changeset
|
3300 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3301 |
2361 | 3302 // If the secondary_free_list is not empty, append it to the |
3303 // free_list. No need to wait for the cleanup operation to finish; | |
3304 // the region allocation code will check the secondary_free_list | |
3305 // and wait if necessary. If the G1StressConcRegionFreeing flag is | |
3306 // set, skip this step so that the region allocation code has to | |
3307 // get entries from the secondary_free_list. | |
2152 | 3308 if (!G1StressConcRegionFreeing) { |
2361 | 3309 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 3310 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3311 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3312 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3313 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3314 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3315 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3316 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3317 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3318 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3319 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3320 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3321 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3322 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3323 increment_total_collections(false /* full gc */); |
342 | 3324 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3325 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3326 HandleMark hm; // Discard invalid handles created during verification |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3327 gclog_or_tty->print(" VerifyBeforeGC:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3328 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3329 Universe::verify(/* allow dirty */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3330 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3331 /* option */ VerifyOption_G1UsePrevMarking); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3332 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3333 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3334 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3335 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3336 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3337 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3338 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3339 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3340 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3341 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3342 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3343 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3344 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3345 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3346 // of the collection set!). |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3347 release_mutator_alloc_region(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3348 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3349 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3350 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3351 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3352 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3353 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3354 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3355 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3356 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3357 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3358 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3359 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3360 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3361 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3362 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3363 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3364 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3365 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3366 #endif // YOUNG_LIST_VERBOSE |
342 | 3367 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3368 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3369 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3370 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3371 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3372 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3373 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3374 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3375 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3376 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3377 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3378 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3379 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3380 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3381 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3382 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3383 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3384 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3385 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3386 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3387 |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3388 if (mark_in_progress()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3389 concurrent_mark()->newCSet(); |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3390 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3391 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3392 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3393 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3394 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3395 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3396 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3397 |
1707 | 3398 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3399 |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3400 // We have chosen the complete collection set. If marking is |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3401 // active then, we clear the region fields of any of the |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3402 // concurrent marking tasks whose region fields point into |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3403 // the collection set as these values will become stale. This |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3404 // will cause the owning marking threads to claim a new region |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3405 // when marking restarts. |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3406 if (mark_in_progress()) { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3407 concurrent_mark()->reset_active_task_region_fields_in_cset(); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3408 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3409 |
3777
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3410 #ifdef ASSERT |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3411 VerifyCSetClosure cl; |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3412 collection_set_iterate(&cl); |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
3413 #endif // ASSERT |
1707 | 3414 |
3415 setup_surviving_young_words(); | |
3416 | |
3417 // Set up the gc allocation regions. | |
3418 get_gc_alloc_regions(); | |
3419 | |
3420 // Actually do the work... | |
3421 evacuate_collection_set(); | |
3422 | |
3423 free_collection_set(g1_policy()->collection_set()); | |
3424 g1_policy()->clear_collection_set(); | |
3425 | |
3426 cleanup_surviving_young_words(); | |
3427 | |
3428 // Start a new incremental collection set for the next pause. | |
3429 g1_policy()->start_incremental_cset_building(); | |
3430 | |
3431 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3432 // regions to the incremental collection set for the next | |
3433 // evacuation pause. | |
3434 clear_cset_fast_test(); | |
3435 | |
3436 if (g1_policy()->in_young_gc_mode()) { | |
3437 _young_list->reset_sampled_info(); | |
3438 | |
3439 // Don't check the whole heap at this point as the | |
3440 // GC alloc regions from this pause have been tagged | |
3441 // as survivors and moved on to the survivor list. | |
3442 // Survivor regions will fail the !is_young() check. | |
3443 assert(check_young_list_empty(false /* check_heap */), | |
3444 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3445 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3446 #if YOUNG_LIST_VERBOSE |
1707 | 3447 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3448 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3449 #endif // YOUNG_LIST_VERBOSE |
342 | 3450 |
1707 | 3451 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3452 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3453 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3454 |
1707 | 3455 _young_list->reset_auxilary_lists(); |
342 | 3456 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3457 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3458 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3459 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3460 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3461 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3462 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3463 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3464 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3465 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3466 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3467 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3468 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3469 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3470 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3471 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3472 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3473 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3474 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3475 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3476 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3477 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3478 } |
342 | 3479 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3480 allocate_dummy_regions(); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3481 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3482 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3483 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3484 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3485 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3486 #endif // YOUNG_LIST_VERBOSE |
342 | 3487 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3488 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3489 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3490 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3491 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3492 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3493 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3494 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3495 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3496 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3497 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3498 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3499 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3500 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3501 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3502 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3503 /* option */ VerifyOption_G1UsePrevMarking); |
342 | 3504 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3505 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3506 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3507 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3508 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3509 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3510 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3511 size_t bytes_before = capacity(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3512 if (!expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3513 // We failed to expand the heap so let's verify that |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3514 // committed/uncommitted amount match the backing store |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3515 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3516 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3517 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3518 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3519 } |
3764
053d84a76d3d
7032531: G1: enhance GC logging to include more accurate eden / survivor size transitions
tonyp
parents:
3378
diff
changeset
|
3520 // We have to do this after we decide whether to expand the heap or not. |
053d84a76d3d
7032531: G1: enhance GC logging to include more accurate eden / survivor size transitions
tonyp
parents:
3378
diff
changeset
|
3521 g1_policy()->print_heap_transition(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3522 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3523 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3524 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3525 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3526 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3527 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3528 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3529 #endif |
342 | 3530 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3531 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3532 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3533 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3534 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3535 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3536 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3537 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3538 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3539 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3540 |
3766 | 3541 _hrs.verify_optional(); |
2152 | 3542 verify_region_sets_optional(); |
3543 | |
1709 | 3544 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3545 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3546 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3547 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3548 Universe::print_heap_after_gc(); |
342 | 3549 } |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3550 g1mm()->update_counters(); |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3551 |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3552 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3553 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3554 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3555 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3556 } |
1973 | 3557 |
3558 return true; | |
342 | 3559 } |
3560 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3561 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3562 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3563 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3564 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3565 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3566 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3567 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3568 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3569 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3570 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3571 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3572 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3573 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3574 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3575 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3576 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3577 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3578 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3579 void G1CollectedHeap::init_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3580 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3581 _mutator_alloc_region.init(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3582 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3583 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3584 void G1CollectedHeap::release_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3585 _mutator_alloc_region.release(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3586 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3587 } |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3588 |
342 | 3589 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3590 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3591 // make sure we don't call set_gc_alloc_region() multiple times on |
3592 // the same region | |
3593 assert(r == NULL || !r->is_gc_alloc_region(), | |
3594 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3595 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3596 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3597 |
342 | 3598 HeapWord* original_top = NULL; |
3599 if (r != NULL) | |
3600 original_top = r->top(); | |
3601 | |
3602 // We will want to record the used space in r as being there before gc. | |
3603 // One we install it as a GC alloc region it's eligible for allocation. | |
3604 // So record it now and use it later. | |
3605 size_t r_used = 0; | |
3606 if (r != NULL) { | |
3607 r_used = r->used(); | |
3608 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3609 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3610 // need to take the lock to guard against two threads calling |
3611 // get_gc_alloc_region concurrently (very unlikely but...) | |
3612 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3613 r->save_marks(); | |
3614 } | |
3615 } | |
3616 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3617 _gc_alloc_regions[purpose] = r; | |
3618 if (old_alloc_region != NULL) { | |
3619 // Replace aliases too. | |
3620 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3621 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3622 _gc_alloc_regions[ap] = r; | |
3623 } | |
3624 } | |
3625 } | |
3626 if (r != NULL) { | |
3627 push_gc_alloc_region(r); | |
3628 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3629 // We are using a region as a GC alloc region after it has been used | |
3630 // as a mutator allocation region during the current marking cycle. | |
3631 // The mutator-allocated objects are currently implicitly marked, but | |
3632 // when we move hr->next_top_at_mark_start() forward at the the end | |
3633 // of the GC pause, they won't be. We therefore mark all objects in | |
3634 // the "gap". We do this object-by-object, since marking densely | |
3635 // does not currently work right with marking bitmap iteration. This | |
3636 // means we rely on TLAB filling at the start of pauses, and no | |
3637 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3638 // to fix the marking bitmap iteration. | |
3639 HeapWord* curhw = r->next_top_at_mark_start(); | |
3640 HeapWord* t = original_top; | |
3641 | |
3642 while (curhw < t) { | |
3643 oop cur = (oop)curhw; | |
3644 // We'll assume parallel for generality. This is rare code. | |
3645 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3646 curhw = curhw + cur->size(); | |
3647 } | |
3648 assert(curhw == t, "Should have parsed correctly."); | |
3649 } | |
3650 if (G1PolicyVerbose > 1) { | |
3651 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3652 "for survivors:", r->bottom(), original_top, r->end()); | |
3653 r->print(); | |
3654 } | |
3655 g1_policy()->record_before_bytes(r_used); | |
3656 } | |
3657 } | |
3658 | |
3659 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3660 assert(Thread::current()->is_VM_thread() || | |
2152 | 3661 FreeList_lock->owned_by_self(), "Precondition"); |
342 | 3662 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
3663 "Precondition."); | |
3664 hr->set_is_gc_alloc_region(true); | |
3665 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3666 _gc_alloc_region_list = hr; | |
3667 } | |
3668 | |
3669 #ifdef G1_DEBUG | |
3670 class FindGCAllocRegion: public HeapRegionClosure { | |
3671 public: | |
3672 bool doHeapRegion(HeapRegion* r) { | |
3673 if (r->is_gc_alloc_region()) { | |
3766 | 3674 gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region", |
3675 HR_FORMAT_PARAMS(r)); | |
342 | 3676 } |
3677 return false; | |
3678 } | |
3679 }; | |
3680 #endif // G1_DEBUG | |
3681 | |
3682 void G1CollectedHeap::forget_alloc_region_list() { | |
2152 | 3683 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 3684 while (_gc_alloc_region_list != NULL) { |
3685 HeapRegion* r = _gc_alloc_region_list; | |
3686 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3687 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3688 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3689 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3690 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3691 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3692 r->ContiguousSpace::set_saved_mark(); |
342 | 3693 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3694 r->set_next_gc_alloc_region(NULL); | |
3695 r->set_is_gc_alloc_region(false); | |
545 | 3696 if (r->is_survivor()) { |
3697 if (r->is_empty()) { | |
3698 r->set_not_young(); | |
3699 } else { | |
3700 _young_list->add_survivor_region(r); | |
3701 } | |
3702 } | |
342 | 3703 } |
3704 #ifdef G1_DEBUG | |
3705 FindGCAllocRegion fa; | |
3706 heap_region_iterate(&fa); | |
3707 #endif // G1_DEBUG | |
3708 } | |
3709 | |
3710 | |
3711 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3712 // TODO: allocation regions check | |
3713 return true; | |
3714 } | |
3715 | |
3716 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3717 // First, let's check that the GC alloc region list is empty (it should) |
3718 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3719 | |
342 | 3720 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3721 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3722 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3723 |
342 | 3724 // Create new GC alloc regions. |
636 | 3725 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3726 _retained_gc_alloc_regions[ap] = NULL; | |
3727 | |
3728 if (alloc_region != NULL) { | |
3729 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3730 | |
3731 // let's make sure that the GC alloc region is not tagged as such | |
3732 // outside a GC operation | |
3733 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3734 | |
3735 if (alloc_region->in_collection_set() || | |
3736 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3737 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3738 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3739 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3740 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3741 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3742 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3743 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3744 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3745 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3746 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3747 // object that may be less than the region size). |
636 | 3748 |
3749 alloc_region = NULL; | |
3750 } | |
3751 } | |
3752 | |
3753 if (alloc_region == NULL) { | |
3754 // we will get a new GC alloc region | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3755 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3756 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3757 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3758 ++_gc_alloc_region_counts[ap]; |
1388 | 3759 if (G1PrintHeapRegions) { |
3766 | 3760 gclog_or_tty->print_cr("new alloc region "HR_FORMAT, |
3761 HR_FORMAT_PARAMS(alloc_region)); | |
1388 | 3762 } |
342 | 3763 } |
636 | 3764 |
342 | 3765 if (alloc_region != NULL) { |
636 | 3766 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3767 set_gc_alloc_region(ap, alloc_region); |
3768 } | |
636 | 3769 |
3770 assert(_gc_alloc_regions[ap] == NULL || | |
3771 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3772 "the GC alloc region should be tagged as such"); | |
3773 assert(_gc_alloc_regions[ap] == NULL || | |
3774 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3775 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3776 } |
3777 // Set alternative regions for allocation purposes that have reached | |
636 | 3778 // their limit. |
342 | 3779 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3780 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3781 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3782 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3783 } | |
3784 } | |
3785 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3786 } | |
3787 | |
636 | 3788 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3789 // We keep a separate list of all regions that have been alloc regions in |
636 | 3790 // the current collection pause. Forget that now. This method will |
3791 // untag the GC alloc regions and tear down the GC alloc region | |
3792 // list. It's desirable that no regions are tagged as GC alloc | |
3793 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3794 |
342 | 3795 forget_alloc_region_list(); |
3796 | |
3797 // The current alloc regions contain objs that have survived | |
3798 // collection. Make them no longer GC alloc regions. | |
3799 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3800 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3801 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3802 _gc_alloc_region_counts[ap] = 0; |
636 | 3803 |
3804 if (r != NULL) { | |
3805 // we retain nothing on _gc_alloc_regions between GCs | |
3806 set_gc_alloc_region(ap, NULL); | |
3807 | |
3808 if (r->is_empty()) { | |
2152 | 3809 // We didn't actually allocate anything in it; let's just put |
3810 // it back on the free list. | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
3811 _free_list.add_as_head(r); |
636 | 3812 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3813 // retain it so that we can use it at the beginning of the next GC | |
3814 _retained_gc_alloc_regions[ap] = r; | |
342 | 3815 } |
3816 } | |
636 | 3817 } |
3818 } | |
3819 | |
3820 #ifndef PRODUCT | |
3821 // Useful for debugging | |
3822 | |
3823 void G1CollectedHeap::print_gc_alloc_regions() { | |
3824 gclog_or_tty->print_cr("GC alloc regions"); | |
3825 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3826 HeapRegion* r = _gc_alloc_regions[ap]; | |
3827 if (r == NULL) { | |
3828 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3829 } else { | |
3830 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3831 ap, r->bottom(), r->used()); | |
3832 } | |
3833 } | |
3834 } | |
3835 #endif // PRODUCT | |
342 | 3836 |
3837 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3838 _drain_in_progress = false; | |
3839 set_evac_failure_closure(cl); | |
3840 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3841 } | |
3842 | |
3843 void G1CollectedHeap::finalize_for_evac_failure() { | |
3844 assert(_evac_failure_scan_stack != NULL && | |
3845 _evac_failure_scan_stack->length() == 0, | |
3846 "Postcondition"); | |
3847 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3848 delete _evac_failure_scan_stack; |
342 | 3849 _evac_failure_scan_stack = NULL; |
3850 } | |
3851 | |
3852 | |
3853 | |
3854 // *** Sequential G1 Evacuation | |
3855 | |
3856 class G1IsAliveClosure: public BoolObjectClosure { | |
3857 G1CollectedHeap* _g1; | |
3858 public: | |
3859 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3860 void do_object(oop p) { assert(false, "Do not call."); } | |
3861 bool do_object_b(oop p) { | |
3862 // It is reachable if it is outside the collection set, or is inside | |
3863 // and forwarded. | |
3864 | |
3865 #ifdef G1_DEBUG | |
3866 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3867 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3868 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3869 #endif // G1_DEBUG | |
3870 | |
3871 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3872 } | |
3873 }; | |
3874 | |
3875 class G1KeepAliveClosure: public OopClosure { | |
3876 G1CollectedHeap* _g1; | |
3877 public: | |
3878 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3879 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3880 void do_oop( oop* p) { |
342 | 3881 oop obj = *p; |
3882 #ifdef G1_DEBUG | |
3883 if (PrintGC && Verbose) { | |
3884 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3885 p, (void*) obj, (void*) *p); | |
3886 } | |
3887 #endif // G1_DEBUG | |
3888 | |
3889 if (_g1->obj_in_cs(obj)) { | |
3890 assert( obj->is_forwarded(), "invariant" ); | |
3891 *p = obj->forwardee(); | |
3892 #ifdef G1_DEBUG | |
3893 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3894 (void*) obj, (void*) *p); | |
3895 #endif // G1_DEBUG | |
3896 } | |
3897 } | |
3898 }; | |
3899 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3900 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3901 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3902 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3903 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3904 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3905 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3906 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3907 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3908 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3909 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3910 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3911 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3912 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3913 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3914 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3915 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3916 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3917 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3918 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3919 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3920 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3921 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3922 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3923 |
342 | 3924 class RemoveSelfPointerClosure: public ObjectClosure { |
3925 private: | |
3926 G1CollectedHeap* _g1; | |
3927 ConcurrentMark* _cm; | |
3928 HeapRegion* _hr; | |
3929 size_t _prev_marked_bytes; | |
3930 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3931 OopsInHeapRegionClosure *_cl; |
342 | 3932 public: |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3933 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3934 OopsInHeapRegionClosure* cl) : |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3935 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3936 _next_marked_bytes(0), _cl(cl) {} |
342 | 3937 |
3938 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3939 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3940 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3941 // <original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3942 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3943 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3944 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3945 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3946 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3947 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3948 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3949 // would point into middle of the filler object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3950 // The current approach is to not coalesce and leave the BOT contents intact. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3951 // </original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3952 // |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3953 // We now reset the BOT when we start the object iteration over the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3954 // region and refine its entries for every object we come across. So |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3955 // the above comment is not really relevant and we should be able |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3956 // to coalesce dead objects if we want to. |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3957 void do_object(oop obj) { |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3958 HeapWord* obj_addr = (HeapWord*) obj; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3959 assert(_hr->is_in(obj_addr), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3960 size_t obj_size = obj->size(); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3961 _hr->update_bot_for_object(obj_addr, obj_size); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3962 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3963 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3964 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3965 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3966 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3967 _prev_marked_bytes += (obj_size * HeapWordSize); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3968 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3969 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3970 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3971 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3972 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3973 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3974 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3975 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3976 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3977 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3978 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3979 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3980 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3981 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3982 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3983 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3984 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3985 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3986 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3987 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3988 // dummy object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3989 MemRegion mr((HeapWord*)obj, obj_size); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3990 CollectedHeap::fill_with_object(mr); |
342 | 3991 _cm->clearRangeBothMaps(mr); |
3992 } | |
3993 } | |
3994 }; | |
3995 | |
3996 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3997 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3998 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3999 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4000 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4001 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4002 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4003 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4004 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4005 } |
342 | 4006 HeapRegion* cur = g1_policy()->collection_set(); |
4007 while (cur != NULL) { | |
4008 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4009 assert(!cur->isHumongous(), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4010 |
342 | 4011 if (cur->evacuation_failed()) { |
4012 assert(cur->in_collection_set(), "bad CS"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4013 RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4014 |
3777
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4015 // In the common case we make sure that this is done when the |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4016 // region is freed so that it is "ready-to-go" when it's |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4017 // re-allocated. However, when evacuation failure happens, a |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4018 // region will remain in the heap and might ultimately be added |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4019 // to a CSet in the future. So we have to be careful here and |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4020 // make sure the region's RSet is ready for parallel iteration |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4021 // whenever this might be required in the future. |
e8b0b0392037
7046182: G1: remove unnecessary iterations over the collection set
tonyp
parents:
3774
diff
changeset
|
4022 cur->rem_set()->reset_for_par_iteration(); |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4023 cur->reset_bot(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4024 cl->set_region(cur); |
342 | 4025 cur->object_iterate(&rspc); |
4026 | |
4027 // A number of manipulations to make the TAMS be the current top, | |
4028 // and the marked bytes be the ones observed in the iteration. | |
4029 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
4030 // The comments below are the postconditions achieved by the | |
4031 // calls. Note especially the last such condition, which says that | |
4032 // the count of marked bytes has been properly restored. | |
4033 cur->note_start_of_marking(false); | |
4034 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
4035 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
4036 // _next_marked_bytes == prev_marked_bytes. | |
4037 cur->note_end_of_marking(); | |
4038 // _prev_top_at_mark_start == top(), | |
4039 // _prev_marked_bytes == prev_marked_bytes | |
4040 } | |
4041 // If there is no mark in progress, we modified the _next variables | |
4042 // above needlessly, but harmlessly. | |
4043 if (_g1h->mark_in_progress()) { | |
4044 cur->note_start_of_marking(false); | |
4045 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
4046 // _next_marked_bytes == next_marked_bytes. | |
4047 } | |
4048 | |
4049 // Now make sure the region has the right index in the sorted array. | |
4050 g1_policy()->note_change_in_marked_bytes(cur); | |
4051 } | |
4052 cur = cur->next_in_collection_set(); | |
4053 } | |
4054 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
4055 | |
4056 // Now restore saved marks, if any. | |
4057 if (_objs_with_preserved_marks != NULL) { | |
4058 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
4059 guarantee(_objs_with_preserved_marks->length() == | |
4060 _preserved_marks_of_objs->length(), "Both or none."); | |
4061 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
4062 oop obj = _objs_with_preserved_marks->at(i); | |
4063 markOop m = _preserved_marks_of_objs->at(i); | |
4064 obj->set_mark(m); | |
4065 } | |
4066 // Delete the preserved marks growable arrays (allocated on the C heap). | |
4067 delete _objs_with_preserved_marks; | |
4068 delete _preserved_marks_of_objs; | |
4069 _objs_with_preserved_marks = NULL; | |
4070 _preserved_marks_of_objs = NULL; | |
4071 } | |
4072 } | |
4073 | |
4074 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
4075 _evac_failure_scan_stack->push(obj); | |
4076 } | |
4077 | |
4078 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
4079 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
4080 | |
4081 while (_evac_failure_scan_stack->length() > 0) { | |
4082 oop obj = _evac_failure_scan_stack->pop(); | |
4083 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
4084 obj->oop_iterate_backwards(_evac_failure_closure); | |
4085 } | |
4086 } | |
4087 | |
4088 oop | |
4089 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
4090 oop old) { | |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4091 assert(obj_in_cs(old), |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4092 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4093 (HeapWord*) old)); |
342 | 4094 markOop m = old->mark(); |
4095 oop forward_ptr = old->forward_to_atomic(old); | |
4096 if (forward_ptr == NULL) { | |
4097 // Forward-to-self succeeded. | |
4098 if (_evac_failure_closure != cl) { | |
4099 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
4100 assert(!_drain_in_progress, | |
4101 "Should only be true while someone holds the lock."); | |
4102 // Set the global evac-failure closure to the current thread's. | |
4103 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
4104 set_evac_failure_closure(cl); | |
4105 // Now do the common part. | |
4106 handle_evacuation_failure_common(old, m); | |
4107 // Reset to NULL. | |
4108 set_evac_failure_closure(NULL); | |
4109 } else { | |
4110 // The lock is already held, and this is recursive. | |
4111 assert(_drain_in_progress, "This should only be the recursive case."); | |
4112 handle_evacuation_failure_common(old, m); | |
4113 } | |
4114 return old; | |
4115 } else { | |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4116 // Forward-to-self failed. Either someone else managed to allocate |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4117 // space for this object (old != forward_ptr) or they beat us in |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4118 // self-forwarding it (old == forward_ptr). |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4119 assert(old == forward_ptr || !obj_in_cs(forward_ptr), |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4120 err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" " |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4121 "should not be in the CSet", |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4122 (HeapWord*) old, (HeapWord*) forward_ptr)); |
342 | 4123 return forward_ptr; |
4124 } | |
4125 } | |
4126 | |
4127 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
4128 set_evacuation_failed(true); | |
4129 | |
4130 preserve_mark_if_necessary(old, m); | |
4131 | |
4132 HeapRegion* r = heap_region_containing(old); | |
4133 if (!r->evacuation_failed()) { | |
4134 r->set_evacuation_failed(true); | |
1282 | 4135 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4136 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 4137 "["PTR_FORMAT","PTR_FORMAT")\n", |
4138 r, r->bottom(), r->end()); | |
4139 } | |
4140 } | |
4141 | |
4142 push_on_evac_failure_scan_stack(old); | |
4143 | |
4144 if (!_drain_in_progress) { | |
4145 // prevent recursion in copy_to_survivor_space() | |
4146 _drain_in_progress = true; | |
4147 drain_evac_failure_scan_stack(); | |
4148 _drain_in_progress = false; | |
4149 } | |
4150 } | |
4151 | |
4152 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4153 assert(evacuation_failed(), "Oversaving!"); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4154 // We want to call the "for_promotion_failure" version only in the |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4155 // case of a promotion failure. |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4156 if (m->must_be_preserved_for_promotion_failure(obj)) { |
342 | 4157 if (_objs_with_preserved_marks == NULL) { |
4158 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4159 _objs_with_preserved_marks = | |
4160 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4161 _preserved_marks_of_objs = | |
4162 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4163 } | |
4164 _objs_with_preserved_marks->push(obj); | |
4165 _preserved_marks_of_objs->push(m); | |
4166 } | |
4167 } | |
4168 | |
4169 // *** Parallel G1 Evacuation | |
4170 | |
4171 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4172 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4173 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4174 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4175 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4176 |
342 | 4177 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4178 // let the caller handle alloc failure | |
4179 if (alloc_region == NULL) return NULL; | |
4180 | |
4181 HeapWord* block = alloc_region->par_allocate(word_size); | |
4182 if (block == NULL) { | |
4183 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4184 } | |
4185 return block; | |
4186 } | |
4187 | |
545 | 4188 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4189 bool par) { | |
4190 // Another thread might have obtained alloc_region for the given | |
4191 // purpose, and might be attempting to allocate in it, and might | |
4192 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4193 // region below until we're sure the last allocation has happened. | |
4194 // We ensure this by allocating the remaining space with a garbage | |
4195 // object. | |
4196 if (par) par_allocate_remaining_space(alloc_region); | |
4197 // Now we can do the post-GC stuff on the region. | |
4198 alloc_region->note_end_of_copying(); | |
4199 g1_policy()->record_after_bytes(alloc_region->used()); | |
4200 } | |
4201 | |
342 | 4202 HeapWord* |
4203 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4204 HeapRegion* alloc_region, | |
4205 bool par, | |
4206 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4207 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4208 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4209 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4210 |
2152 | 4211 // We need to make sure we serialize calls to this method. Given |
4212 // that the FreeList_lock guards accesses to the free_list anyway, | |
4213 // and we need to potentially remove a region from it, we'll use it | |
4214 // to protect the whole call. | |
4215 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
4216 | |
342 | 4217 HeapWord* block = NULL; |
4218 // In the parallel case, a previous thread to obtain the lock may have | |
4219 // already assigned a new gc_alloc_region. | |
4220 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4221 assert(par, "But should only happen in parallel case."); | |
4222 alloc_region = _gc_alloc_regions[purpose]; | |
4223 if (alloc_region == NULL) return NULL; | |
4224 block = alloc_region->par_allocate(word_size); | |
4225 if (block != NULL) return block; | |
4226 // Otherwise, continue; this new region is empty, too. | |
4227 } | |
4228 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4229 retire_alloc_region(alloc_region, par); |
342 | 4230 |
4231 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4232 // Cannot allocate more regions for the given purpose. | |
4233 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4234 // Is there an alternative? | |
4235 if (purpose != alt_purpose) { | |
4236 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4237 // Has not the alternative region been aliased? | |
545 | 4238 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4239 // Try to allocate in the alternative region. |
4240 if (par) { | |
4241 block = alt_region->par_allocate(word_size); | |
4242 } else { | |
4243 block = alt_region->allocate(word_size); | |
4244 } | |
4245 // Make an alias. | |
4246 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4247 if (block != NULL) { |
4248 return block; | |
4249 } | |
4250 retire_alloc_region(alt_region, par); | |
342 | 4251 } |
4252 // Both the allocation region and the alternative one are full | |
4253 // and aliased, replace them with a new allocation region. | |
4254 purpose = alt_purpose; | |
4255 } else { | |
4256 set_gc_alloc_region(purpose, NULL); | |
4257 return NULL; | |
4258 } | |
4259 } | |
4260 | |
4261 // Now allocate a new region for allocation. | |
2152 | 4262 alloc_region = new_gc_alloc_region(purpose, word_size); |
342 | 4263 |
4264 // let the caller handle alloc failure | |
4265 if (alloc_region != NULL) { | |
4266 | |
4267 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4268 assert(alloc_region->saved_mark_at_top(), | |
4269 "Mark should have been saved already."); | |
4270 // This must be done last: once it's installed, other regions may | |
4271 // allocate in it (without holding the lock.) | |
4272 set_gc_alloc_region(purpose, alloc_region); | |
4273 | |
4274 if (par) { | |
4275 block = alloc_region->par_allocate(word_size); | |
4276 } else { | |
4277 block = alloc_region->allocate(word_size); | |
4278 } | |
4279 // Caller handles alloc failure. | |
4280 } else { | |
4281 // This sets other apis using the same old alloc region to NULL, also. | |
4282 set_gc_alloc_region(purpose, NULL); | |
4283 } | |
4284 return block; // May be NULL. | |
4285 } | |
4286 | |
4287 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4288 HeapWord* block = NULL; | |
4289 size_t free_words; | |
4290 do { | |
4291 free_words = r->free()/HeapWordSize; | |
4292 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4293 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4294 // Otherwise, try to claim it. |
4295 block = r->par_allocate(free_words); | |
4296 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4297 fill_with_object(block, free_words); |
342 | 4298 } |
4299 | |
4300 #ifndef PRODUCT | |
4301 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4302 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4303 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4304 return true; | |
4305 } | |
4306 #endif // PRODUCT | |
4307 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4308 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4309 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4310 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4311 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4312 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4313 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4314 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4315 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4316 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4317 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4318 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4319 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4320 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4321 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4322 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4323 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4324 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4325 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4326 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4327 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4328 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4329 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4330 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4331 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4332 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4333 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4334 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4335 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4336 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4337 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4338 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4339 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4340 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4341 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4342 } |
342 | 4343 |
1709 | 4344 void |
4345 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4346 { | |
4347 st->print_raw_cr("GC Termination Stats"); | |
4348 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4349 " ------waste (KiB)------"); | |
4350 st->print_raw_cr("thr ms ms % ms % attempts" | |
4351 " total alloc undo"); | |
4352 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4353 " ------- ------- -------"); | |
4354 } | |
4355 | |
4356 void | |
4357 G1ParScanThreadState::print_termination_stats(int i, | |
4358 outputStream* const st) const | |
4359 { | |
4360 const double elapsed_ms = elapsed_time() * 1000.0; | |
4361 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4362 const double term_ms = term_time() * 1000.0; | |
4363 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4364 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4365 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4366 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4367 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4368 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4369 alloc_buffer_waste() * HeapWordSize / K, | |
4370 undo_waste() * HeapWordSize / K); | |
4371 } | |
4372 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4373 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4374 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4375 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4376 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4377 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4378 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4379 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4380 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4381 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4382 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4383 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4384 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4385 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4386 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4387 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4388 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4389 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4390 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4391 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4392 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4393 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4394 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4395 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4396 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4397 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4398 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4399 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4400 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4401 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4402 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4403 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4404 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4405 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4406 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4407 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4408 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4409 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4410 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4411 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4412 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4413 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4414 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4415 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4416 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4417 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4418 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4419 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4420 |
342 | 4421 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4422 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4423 _par_scan_state(par_scan_state) { } | |
4424 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4425 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4426 // This is called _after_ do_oop_work has been called, hence after |
4427 // the object has been relocated to its new location and *p points | |
4428 // to its new location. | |
4429 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4430 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4431 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4432 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4433 HeapWord* addr = (HeapWord*)obj; |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4434 if (_g1->is_in_g1_reserved(addr)) { |
342 | 4435 _cm->grayRoot(oop(addr)); |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4436 } |
342 | 4437 } |
4438 } | |
4439 | |
4440 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4441 size_t word_sz = old->size(); | |
4442 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4443 // +1 to make the -1 indexes valid... | |
4444 int young_index = from_region->young_index_in_cset()+1; | |
4445 assert( (from_region->is_young() && young_index > 0) || | |
4446 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4447 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4448 markOop m = old->mark(); | |
545 | 4449 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4450 : m->age(); | |
4451 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4452 word_sz); |
4453 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4454 oop obj = oop(obj_ptr); | |
4455 | |
4456 if (obj_ptr == NULL) { | |
4457 // This will either forward-to-self, or detect that someone else has | |
4458 // installed a forwarding pointer. | |
4459 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4460 return _g1->handle_evacuation_failure_par(cl, old); | |
4461 } | |
4462 | |
526 | 4463 // We're going to allocate linearly, so might as well prefetch ahead. |
4464 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4465 | |
342 | 4466 oop forward_ptr = old->forward_to_atomic(obj); |
4467 if (forward_ptr == NULL) { | |
4468 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4469 if (g1p->track_object_age(alloc_purpose)) { |
4470 // We could simply do obj->incr_age(). However, this causes a | |
4471 // performance issue. obj->incr_age() will first check whether | |
4472 // the object has a displaced mark by checking its mark word; | |
4473 // getting the mark word from the new location of the object | |
4474 // stalls. So, given that we already have the mark word and we | |
4475 // are about to install it anyway, it's better to increase the | |
4476 // age on the mark word, when the object does not have a | |
4477 // displaced mark word. We're not expecting many objects to have | |
4478 // a displaced marked word, so that case is not optimized | |
4479 // further (it could be...) and we simply call obj->incr_age(). | |
4480 | |
4481 if (m->has_displaced_mark_helper()) { | |
4482 // in this case, we have to install the mark word first, | |
4483 // otherwise obj looks to be forwarded (the old mark word, | |
4484 // which contains the forward pointer, was copied) | |
4485 obj->set_mark(m); | |
4486 obj->incr_age(); | |
4487 } else { | |
4488 m = m->incr_age(); | |
545 | 4489 obj->set_mark(m); |
526 | 4490 } |
545 | 4491 _par_scan_state->age_table()->add(obj, word_sz); |
4492 } else { | |
4493 obj->set_mark(m); | |
526 | 4494 } |
4495 | |
342 | 4496 // preserve "next" mark bit |
4497 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4498 if (!use_local_bitmaps || | |
4499 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4500 // if we couldn't mark it on the local bitmap (this happens when | |
4501 // the object was not allocated in the GCLab), we have to bite | |
4502 // the bullet and do the standard parallel mark | |
4503 _cm->markAndGrayObjectIfNecessary(obj); | |
4504 } | |
4505 #if 1 | |
4506 if (_g1->isMarkedNext(old)) { | |
4507 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4508 } | |
4509 #endif | |
4510 } | |
4511 | |
4512 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4513 surv_young_words[young_index] += word_sz; | |
4514 | |
4515 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4516 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4517 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4518 _par_scan_state->push_on_queue(old_p); |
342 | 4519 } else { |
526 | 4520 // No point in using the slower heap_region_containing() method, |
4521 // given that we know obj is in the heap. | |
4522 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4523 obj->oop_iterate_backwards(_scanner); |
4524 } | |
4525 } else { | |
4526 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4527 obj = forward_ptr; | |
4528 } | |
4529 return obj; | |
4530 } | |
4531 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4532 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4533 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4534 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4535 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4536 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4537 assert(barrier != G1BarrierRS || obj != NULL, |
4538 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4539 | |
526 | 4540 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4541 if (_g1->in_cset_fast_test(obj)) { |
526 | 4542 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4543 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4544 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4545 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4546 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4547 } |
526 | 4548 // When scanning the RS, we only care about objs in CS. |
4549 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4550 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4551 } |
526 | 4552 } |
4553 | |
4554 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4555 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4556 } |
4557 | |
4558 if (do_gen_barrier && obj != NULL) { | |
4559 par_do_barrier(p); | |
4560 } | |
4561 } | |
4562 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4563 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4564 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4565 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4566 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4567 assert(has_partial_array_mask(p), "invariant"); |
4568 oop old = clear_partial_array_mask(p); | |
342 | 4569 assert(old->is_objArray(), "must be obj array"); |
4570 assert(old->is_forwarded(), "must be forwarded"); | |
4571 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4572 | |
4573 objArrayOop obj = objArrayOop(old->forwardee()); | |
4574 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4575 // Process ParGCArrayScanChunk elements now | |
4576 // and push the remainder back onto queue | |
4577 int start = arrayOop(old)->length(); | |
4578 int end = obj->length(); | |
4579 int remainder = end - start; | |
4580 assert(start <= end, "just checking"); | |
4581 if (remainder > 2 * ParGCArrayScanChunk) { | |
4582 // Test above combines last partial chunk with a full chunk | |
4583 end = start + ParGCArrayScanChunk; | |
4584 arrayOop(old)->set_length(end); | |
4585 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4586 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4587 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4588 _par_scan_state->push_on_queue(old_p); |
342 | 4589 } else { |
4590 // Restore length so that the heap remains parsable in | |
4591 // case of evacuation failure. | |
4592 arrayOop(old)->set_length(end); | |
4593 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4594 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4595 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4596 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4597 } |
4598 | |
4599 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4600 protected: | |
4601 G1CollectedHeap* _g1h; | |
4602 G1ParScanThreadState* _par_scan_state; | |
4603 RefToScanQueueSet* _queues; | |
4604 ParallelTaskTerminator* _terminator; | |
4605 | |
4606 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4607 RefToScanQueueSet* queues() { return _queues; } | |
4608 ParallelTaskTerminator* terminator() { return _terminator; } | |
4609 | |
4610 public: | |
4611 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4612 G1ParScanThreadState* par_scan_state, | |
4613 RefToScanQueueSet* queues, | |
4614 ParallelTaskTerminator* terminator) | |
4615 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4616 _queues(queues), _terminator(terminator) {} | |
4617 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4618 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4619 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4620 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4621 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4622 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4623 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4624 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4625 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4626 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4627 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4628 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4629 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4630 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4631 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4632 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4633 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4634 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4635 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4636 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4637 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4638 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4639 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4640 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4641 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4642 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4643 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4644 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4645 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4646 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4647 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4648 // we drain the queues as necessary. |
342 | 4649 pss->trim_queue(); |
4650 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4651 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4652 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4653 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4654 } |
342 | 4655 |
4656 class G1ParTask : public AbstractGangTask { | |
4657 protected: | |
4658 G1CollectedHeap* _g1h; | |
4659 RefToScanQueueSet *_queues; | |
4660 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4661 int _n_workers; |
342 | 4662 |
4663 Mutex _stats_lock; | |
4664 Mutex* stats_lock() { return &_stats_lock; } | |
4665 | |
4666 size_t getNCards() { | |
4667 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4668 / G1BlockOffsetSharedArray::N_bytes; | |
4669 } | |
4670 | |
4671 public: | |
4672 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4673 : AbstractGangTask("G1 collection"), | |
4674 _g1h(g1h), | |
4675 _queues(task_queues), | |
4676 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4677 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4678 _n_workers(workers) |
342 | 4679 {} |
4680 | |
4681 RefToScanQueueSet* queues() { return _queues; } | |
4682 | |
4683 RefToScanQueue *work_queue(int i) { | |
4684 return queues()->queue(i); | |
4685 } | |
4686 | |
4687 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4688 if (i >= _n_workers) return; // no work needed this round |
1611 | 4689 |
4690 double start_time_ms = os::elapsedTime() * 1000.0; | |
4691 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4692 | |
342 | 4693 ResourceMark rm; |
4694 HandleMark hm; | |
4695 | |
526 | 4696 G1ParScanThreadState pss(_g1h, i); |
4697 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4698 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4699 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4700 |
4701 pss.set_evac_closure(&scan_evac_cl); | |
4702 pss.set_evac_failure_closure(&evac_failure_cl); | |
4703 pss.set_partial_scan_closure(&partial_scan_cl); | |
4704 | |
4705 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4706 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4707 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4708 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4709 |
342 | 4710 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4711 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4712 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4713 | |
4714 OopsInHeapRegionClosure *scan_root_cl; | |
4715 OopsInHeapRegionClosure *scan_perm_cl; | |
4716 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4717 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4718 scan_root_cl = &scan_mark_root_cl; |
4719 scan_perm_cl = &scan_mark_perm_cl; | |
4720 } else { | |
4721 scan_root_cl = &only_scan_root_cl; | |
4722 scan_perm_cl = &only_scan_perm_cl; | |
4723 } | |
4724 | |
4725 pss.start_strong_roots(); | |
4726 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4727 SharedHeap::SO_AllClasses, | |
4728 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4729 &push_heap_rs_cl, |
342 | 4730 scan_perm_cl, |
4731 i); | |
4732 pss.end_strong_roots(); | |
4733 { | |
4734 double start = os::elapsedTime(); | |
4735 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4736 evac.do_void(); | |
4737 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4738 double term_ms = pss.term_time()*1000.0; | |
4739 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4740 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4741 } |
1282 | 4742 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4743 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4744 | |
4745 // Clean up any par-expanded rem sets. | |
4746 HeapRegionRemSet::par_cleanup(); | |
4747 | |
4748 if (ParallelGCVerbose) { | |
1709 | 4749 MutexLocker x(stats_lock()); |
4750 pss.print_termination_stats(i); | |
342 | 4751 } |
4752 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4753 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4754 double end_time_ms = os::elapsedTime() * 1000.0; |
4755 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4756 } |
4757 }; | |
4758 | |
4759 // *** Common G1 Evacuation Stuff | |
4760 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4761 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4762 |
342 | 4763 void |
4764 G1CollectedHeap:: | |
4765 g1_process_strong_roots(bool collecting_perm_gen, | |
4766 SharedHeap::ScanningOption so, | |
4767 OopClosure* scan_non_heap_roots, | |
4768 OopsInHeapRegionClosure* scan_rs, | |
4769 OopsInGenClosure* scan_perm, | |
4770 int worker_i) { | |
4771 // First scan the strong roots, including the perm gen. | |
4772 double ext_roots_start = os::elapsedTime(); | |
4773 double closure_app_time_sec = 0.0; | |
4774 | |
4775 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4776 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4777 buf_scan_perm.set_generation(perm_gen()); | |
4778 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4779 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4780 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4781 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4782 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4783 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4784 collecting_perm_gen, so, |
342 | 4785 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4786 &eager_scan_code_roots, |
342 | 4787 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4788 |
342 | 4789 // Finish up any enqueued closure apps. |
4790 buf_scan_non_heap_roots.done(); | |
4791 buf_scan_perm.done(); | |
4792 double ext_roots_end = os::elapsedTime(); | |
4793 g1_policy()->reset_obj_copy_time(worker_i); | |
4794 double obj_copy_time_sec = | |
4795 buf_scan_non_heap_roots.closure_app_seconds() + | |
4796 buf_scan_perm.closure_app_seconds(); | |
4797 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4798 double ext_root_time_ms = | |
4799 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4800 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4801 | |
4802 // Scan strong roots in mark stack. | |
4803 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4804 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4805 } | |
4806 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4807 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4808 | |
4809 // XXX What should this be doing in the parallel case? | |
4810 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4811 // Now scan the complement of the collection set. | |
4812 if (scan_rs != NULL) { | |
4813 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4814 } | |
4815 // Finish with the ref_processor roots. | |
4816 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4817 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4818 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4819 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4820 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4821 ref_processor()->oops_do(scan_non_heap_roots); |
4822 } | |
4823 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4824 _process_strong_tasks->all_tasks_completed(); | |
4825 } | |
4826 | |
4827 void | |
4828 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4829 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4830 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4831 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4832 } |
4833 | |
4834 | |
4835 class SaveMarksClosure: public HeapRegionClosure { | |
4836 public: | |
4837 bool doHeapRegion(HeapRegion* r) { | |
4838 r->save_marks(); | |
4839 return false; | |
4840 } | |
4841 }; | |
4842 | |
4843 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4844 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4845 SaveMarksClosure sm; |
4846 heap_region_iterate(&sm); | |
4847 } | |
4848 // We do this even in the parallel case | |
4849 perm_gen()->save_marks(); | |
4850 } | |
4851 | |
4852 void G1CollectedHeap::evacuate_collection_set() { | |
4853 set_evacuation_failed(false); | |
4854 | |
4855 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4856 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4857 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4858 | |
342 | 4859 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4860 set_par_threads(n_workers); | |
4861 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4862 | |
4863 init_for_evac_failure(NULL); | |
4864 | |
4865 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4866 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4867 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4868 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4869 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4870 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4871 StrongRootsScope srs(this); |
1709 | 4872 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4873 workers()->run_task(&g1_par_task); |
4874 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4875 StrongRootsScope srs(this); |
342 | 4876 g1_par_task.work(0); |
4877 } | |
4878 | |
4879 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4880 g1_policy()->record_par_time(par_time); | |
4881 set_par_threads(0); | |
4882 // Is this the right thing to do here? We don't save marks | |
4883 // on individual heap regions when we allocate from | |
4884 // them in parallel, so this seems like the correct place for this. | |
545 | 4885 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4886 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4887 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4888 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4889 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4890 // here too. |
342 | 4891 { |
4892 G1IsAliveClosure is_alive(this); | |
4893 G1KeepAliveClosure keep_alive(this); | |
4894 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4895 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4896 release_gc_alloc_regions(false /* totally */); |
342 | 4897 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4898 |
889 | 4899 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4900 concurrent_g1_refine()->set_use_cache(true); |
4901 | |
4902 finalize_for_evac_failure(); | |
4903 | |
4904 // Must do this before removing self-forwarding pointers, which clears | |
4905 // the per-region evac-failure flags. | |
4906 concurrent_mark()->complete_marking_in_collection_set(); | |
4907 | |
4908 if (evacuation_failed()) { | |
4909 remove_self_forwarding_pointers(); | |
4910 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4911 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4912 } else if (PrintGC) { |
4913 gclog_or_tty->print("--"); | |
4914 } | |
4915 } | |
4916 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4917 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4918 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4919 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4920 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4921 |
4922 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4923 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4924 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4925 } |
342 | 4926 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4927 } | |
4928 | |
2173 | 4929 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, |
2152 | 4930 size_t* pre_used, |
4931 FreeRegionList* free_list, | |
4932 HumongousRegionSet* humongous_proxy_set, | |
2173 | 4933 HRRSCleanupTask* hrrs_cleanup_task, |
2152 | 4934 bool par) { |
4935 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { | |
4936 if (hr->isHumongous()) { | |
4937 assert(hr->startsHumongous(), "we should only see starts humongous"); | |
4938 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); | |
4939 } else { | |
4940 free_region(hr, pre_used, free_list, par); | |
342 | 4941 } |
2173 | 4942 } else { |
4943 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task); | |
342 | 4944 } |
4945 } | |
4946 | |
2152 | 4947 void G1CollectedHeap::free_region(HeapRegion* hr, |
4948 size_t* pre_used, | |
4949 FreeRegionList* free_list, | |
4950 bool par) { | |
4951 assert(!hr->isHumongous(), "this is only for non-humongous regions"); | |
4952 assert(!hr->is_empty(), "the region should not be empty"); | |
4953 assert(free_list != NULL, "pre-condition"); | |
4954 | |
4955 *pre_used += hr->used(); | |
4956 hr->hr_clear(par, true /* clear_space */); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4957 free_list->add_as_head(hr); |
2152 | 4958 } |
4959 | |
4960 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, | |
4961 size_t* pre_used, | |
4962 FreeRegionList* free_list, | |
4963 HumongousRegionSet* humongous_proxy_set, | |
4964 bool par) { | |
4965 assert(hr->startsHumongous(), "this is only for starts humongous regions"); | |
4966 assert(free_list != NULL, "pre-condition"); | |
4967 assert(humongous_proxy_set != NULL, "pre-condition"); | |
4968 | |
4969 size_t hr_used = hr->used(); | |
4970 size_t hr_capacity = hr->capacity(); | |
4971 size_t hr_pre_used = 0; | |
4972 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); | |
4973 hr->set_notHumongous(); | |
4974 free_region(hr, &hr_pre_used, free_list, par); | |
4975 | |
3766 | 4976 size_t i = hr->hrs_index() + 1; |
2152 | 4977 size_t num = 1; |
3766 | 4978 while (i < n_regions()) { |
4979 HeapRegion* curr_hr = region_at(i); | |
2152 | 4980 if (!curr_hr->continuesHumongous()) { |
4981 break; | |
4982 } | |
4983 curr_hr->set_notHumongous(); | |
4984 free_region(curr_hr, &hr_pre_used, free_list, par); | |
4985 num += 1; | |
4986 i += 1; | |
4987 } | |
4988 assert(hr_pre_used == hr_used, | |
4989 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " | |
4990 "should be the same", hr_pre_used, hr_used)); | |
4991 *pre_used += hr_pre_used; | |
4992 } | |
4993 | |
4994 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, | |
4995 FreeRegionList* free_list, | |
4996 HumongousRegionSet* humongous_proxy_set, | |
4997 bool par) { | |
4998 if (pre_used > 0) { | |
4999 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; | |
342 | 5000 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
2152 | 5001 assert(_summary_bytes_used >= pre_used, |
5002 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" " | |
5003 "should be >= pre_used: "SIZE_FORMAT, | |
5004 _summary_bytes_used, pre_used)); | |
342 | 5005 _summary_bytes_used -= pre_used; |
2152 | 5006 } |
5007 if (free_list != NULL && !free_list->is_empty()) { | |
5008 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
5009 _free_list.add_as_head(free_list); |
2152 | 5010 } |
5011 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { | |
5012 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); | |
5013 _humongous_set.update_from_proxy(humongous_proxy_set); | |
342 | 5014 } |
5015 } | |
5016 | |
5017 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
5018 while (list != NULL) { | |
5019 guarantee( list->is_young(), "invariant" ); | |
5020 | |
5021 HeapWord* bottom = list->bottom(); | |
5022 HeapWord* end = list->end(); | |
5023 MemRegion mr(bottom, end); | |
5024 ct_bs->dirty(mr); | |
5025 | |
5026 list = list->get_next_young_region(); | |
5027 } | |
5028 } | |
5029 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5030 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5031 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5032 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5033 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5034 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5035 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5036 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5037 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5038 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5039 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5040 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5041 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5042 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5043 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5044 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5045 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5046 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5047 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5048 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5049 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5050 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5051 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5052 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5053 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5054 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5055 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5056 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5057 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5058 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5059 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5060 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5061 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5062 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5063 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5064 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5065 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5066 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5067 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5068 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5069 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5070 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5071 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5072 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5073 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5074 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5075 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5076 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5077 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5078 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5079 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5080 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5081 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5082 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5083 public: |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5084 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5085 : _g1h(g1h), _ct_bs(ct_bs) { } |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5086 virtual bool doHeapRegion(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5087 if (r->is_survivor()) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5088 _g1h->verify_dirty_region(r); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5089 } else { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5090 _g1h->verify_not_dirty_region(r); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5091 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5092 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5093 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5094 }; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5095 |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5096 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5097 // All of the region should be clean. |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5098 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5099 MemRegion mr(hr->bottom(), hr->end()); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5100 ct_bs->verify_not_dirty_region(mr); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5101 } |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5102 |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5103 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5104 // We cannot guarantee that [bottom(),end()] is dirty. Threads |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5105 // dirty allocated blocks as they allocate them. The thread that |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5106 // retires each region and replaces it with a new one will do a |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5107 // maximal allocation to fill in [pre_dummy_top(),end()] but will |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5108 // not dirty that area (one less thing to have to do while holding |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5109 // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5110 // is dirty. |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5111 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5112 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5113 ct_bs->verify_dirty_region(mr); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5114 } |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5115 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5116 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5117 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5118 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5119 verify_dirty_region(hr); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5120 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5121 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5122 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5123 void G1CollectedHeap::verify_dirty_young_regions() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5124 verify_dirty_young_list(_young_list->first_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5125 verify_dirty_young_list(_young_list->first_survivor_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5126 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5127 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5128 |
342 | 5129 void G1CollectedHeap::cleanUpCardTable() { |
5130 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
5131 double start = os::elapsedTime(); | |
5132 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5133 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5134 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5135 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5136 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5137 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5138 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5139 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5140 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5141 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5142 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5143 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5144 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5145 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5146 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5147 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5148 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5149 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5150 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5151 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5152 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5153 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5154 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5155 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5156 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5157 |
342 | 5158 double elapsed = os::elapsedTime() - start; |
5159 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5160 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5161 if (G1VerifyCTCleanup || VerifyAfterGC) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5162 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5163 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5164 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5165 #endif |
342 | 5166 } |
5167 | |
5168 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
2152 | 5169 size_t pre_used = 0; |
5170 FreeRegionList local_free_list("Local List for CSet Freeing"); | |
5171 | |
342 | 5172 double young_time_ms = 0.0; |
5173 double non_young_time_ms = 0.0; | |
5174 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5175 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5176 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5177 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5178 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5179 |
342 | 5180 G1CollectorPolicy* policy = g1_policy(); |
5181 | |
5182 double start_sec = os::elapsedTime(); | |
5183 bool non_young = true; | |
5184 | |
5185 HeapRegion* cur = cs_head; | |
5186 int age_bound = -1; | |
5187 size_t rs_lengths = 0; | |
5188 | |
5189 while (cur != NULL) { | |
2361 | 5190 assert(!is_on_master_free_list(cur), "sanity"); |
2152 | 5191 |
342 | 5192 if (non_young) { |
5193 if (cur->is_young()) { | |
5194 double end_sec = os::elapsedTime(); | |
5195 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5196 non_young_time_ms += elapsed_ms; | |
5197 | |
5198 start_sec = os::elapsedTime(); | |
5199 non_young = false; | |
5200 } | |
5201 } else { | |
2152 | 5202 double end_sec = os::elapsedTime(); |
5203 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5204 young_time_ms += elapsed_ms; | |
5205 | |
5206 start_sec = os::elapsedTime(); | |
5207 non_young = true; | |
342 | 5208 } |
5209 | |
5210 rs_lengths += cur->rem_set()->occupied(); | |
5211 | |
5212 HeapRegion* next = cur->next_in_collection_set(); | |
5213 assert(cur->in_collection_set(), "bad CS"); | |
5214 cur->set_next_in_collection_set(NULL); | |
5215 cur->set_in_collection_set(false); | |
5216 | |
5217 if (cur->is_young()) { | |
5218 int index = cur->young_index_in_cset(); | |
5219 guarantee( index != -1, "invariant" ); | |
5220 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5221 size_t words_survived = _surviving_young_words[index]; | |
5222 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5223 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5224 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5225 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5226 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5227 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5228 cur->set_next_young_region(NULL); |
342 | 5229 } else { |
5230 int index = cur->young_index_in_cset(); | |
5231 guarantee( index == -1, "invariant" ); | |
5232 } | |
5233 | |
5234 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5235 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5236 "invariant" ); | |
5237 | |
5238 if (!cur->evacuation_failed()) { | |
5239 // And the region is empty. | |
2152 | 5240 assert(!cur->is_empty(), "Should not have empty regions in a CS."); |
5241 free_region(cur, &pre_used, &local_free_list, false /* par */); | |
342 | 5242 } else { |
5243 cur->uninstall_surv_rate_group(); | |
5244 if (cur->is_young()) | |
5245 cur->set_young_index_in_cset(-1); | |
5246 cur->set_not_young(); | |
5247 cur->set_evacuation_failed(false); | |
5248 } | |
5249 cur = next; | |
5250 } | |
5251 | |
5252 policy->record_max_rs_lengths(rs_lengths); | |
5253 policy->cset_regions_freed(); | |
5254 | |
5255 double end_sec = os::elapsedTime(); | |
5256 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5257 if (non_young) | |
5258 non_young_time_ms += elapsed_ms; | |
5259 else | |
5260 young_time_ms += elapsed_ms; | |
5261 | |
2152 | 5262 update_sets_after_freeing_regions(pre_used, &local_free_list, |
5263 NULL /* humongous_proxy_set */, | |
5264 false /* par */); | |
342 | 5265 policy->record_young_free_cset_time_ms(young_time_ms); |
5266 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5267 } | |
5268 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5269 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5270 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5271 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5272 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5273 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5274 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5275 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5276 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5277 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5278 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5279 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5280 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5281 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5282 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5283 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5284 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5285 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5286 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5287 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5288 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5289 |
2152 | 5290 void G1CollectedHeap::set_free_regions_coming() { |
5291 if (G1ConcRegionFreeingVerbose) { | |
5292 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5293 "setting free regions coming"); | |
5294 } | |
5295 | |
5296 assert(!free_regions_coming(), "pre-condition"); | |
5297 _free_regions_coming = true; | |
342 | 5298 } |
5299 | |
2152 | 5300 void G1CollectedHeap::reset_free_regions_coming() { |
5301 { | |
5302 assert(free_regions_coming(), "pre-condition"); | |
5303 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5304 _free_regions_coming = false; | |
5305 SecondaryFreeList_lock->notify_all(); | |
5306 } | |
5307 | |
5308 if (G1ConcRegionFreeingVerbose) { | |
5309 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5310 "reset free regions coming"); | |
342 | 5311 } |
5312 } | |
5313 | |
2152 | 5314 void G1CollectedHeap::wait_while_free_regions_coming() { |
5315 // Most of the time we won't have to wait, so let's do a quick test | |
5316 // first before we take the lock. | |
5317 if (!free_regions_coming()) { | |
5318 return; | |
5319 } | |
5320 | |
5321 if (G1ConcRegionFreeingVerbose) { | |
5322 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5323 "waiting for free regions"); | |
342 | 5324 } |
5325 | |
5326 { | |
2152 | 5327 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
5328 while (free_regions_coming()) { | |
5329 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
342 | 5330 } |
2152 | 5331 } |
5332 | |
5333 if (G1ConcRegionFreeingVerbose) { | |
5334 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5335 "done waiting for free regions"); | |
5336 } | |
342 | 5337 } |
5338 | |
5339 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5340 assert(heap_lock_held_for_gc(), | |
5341 "the heap lock should already be held by or for this thread"); | |
5342 _young_list->push_region(hr); | |
5343 g1_policy()->set_region_short_lived(hr); | |
5344 } | |
5345 | |
5346 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5347 private: | |
5348 bool _success; | |
5349 public: | |
5350 NoYoungRegionsClosure() : _success(true) { } | |
5351 bool doHeapRegion(HeapRegion* r) { | |
5352 if (r->is_young()) { | |
5353 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5354 r->bottom(), r->end()); | |
5355 _success = false; | |
5356 } | |
5357 return false; | |
5358 } | |
5359 bool success() { return _success; } | |
5360 }; | |
5361 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5362 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5363 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5364 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5365 if (check_heap) { |
342 | 5366 NoYoungRegionsClosure closure; |
5367 heap_region_iterate(&closure); | |
5368 ret = ret && closure.success(); | |
5369 } | |
5370 | |
5371 return ret; | |
5372 } | |
5373 | |
5374 void G1CollectedHeap::empty_young_list() { | |
5375 assert(heap_lock_held_for_gc(), | |
5376 "the heap lock should already be held by or for this thread"); | |
5377 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5378 | |
5379 _young_list->empty_list(); | |
5380 } | |
5381 | |
5382 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5383 bool no_allocs = true; | |
5384 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5385 HeapRegion* r = _gc_alloc_regions[ap]; | |
5386 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5387 } | |
5388 return no_allocs; | |
5389 } | |
5390 | |
545 | 5391 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5392 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5393 HeapRegion* r = _gc_alloc_regions[ap]; | |
5394 if (r != NULL) { | |
5395 // Check for aliases. | |
5396 bool has_processed_alias = false; | |
5397 for (int i = 0; i < ap; ++i) { | |
5398 if (_gc_alloc_regions[i] == r) { | |
5399 has_processed_alias = true; | |
5400 break; | |
5401 } | |
5402 } | |
5403 if (!has_processed_alias) { | |
545 | 5404 retire_alloc_region(r, false /* par */); |
342 | 5405 } |
5406 } | |
5407 } | |
5408 } | |
5409 | |
5410 // Done at the start of full GC. | |
5411 void G1CollectedHeap::tear_down_region_lists() { | |
2152 | 5412 _free_list.remove_all(); |
342 | 5413 } |
5414 | |
5415 class RegionResetter: public HeapRegionClosure { | |
2152 | 5416 G1CollectedHeap* _g1h; |
5417 FreeRegionList _local_free_list; | |
5418 | |
342 | 5419 public: |
2152 | 5420 RegionResetter() : _g1h(G1CollectedHeap::heap()), |
5421 _local_free_list("Local Free List for RegionResetter") { } | |
5422 | |
342 | 5423 bool doHeapRegion(HeapRegion* r) { |
5424 if (r->continuesHumongous()) return false; | |
5425 if (r->top() > r->bottom()) { | |
5426 if (r->top() < r->end()) { | |
5427 Copy::fill_to_words(r->top(), | |
5428 pointer_delta(r->end(), r->top())); | |
5429 } | |
5430 } else { | |
5431 assert(r->is_empty(), "tautology"); | |
2152 | 5432 _local_free_list.add_as_tail(r); |
342 | 5433 } |
5434 return false; | |
5435 } | |
5436 | |
2152 | 5437 void update_free_lists() { |
5438 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, | |
5439 false /* par */); | |
5440 } | |
342 | 5441 }; |
5442 | |
5443 // Done at the end of full GC. | |
5444 void G1CollectedHeap::rebuild_region_lists() { | |
5445 // This needs to go at the end of the full GC. | |
5446 RegionResetter rs; | |
5447 heap_region_iterate(&rs); | |
2152 | 5448 rs.update_free_lists(); |
342 | 5449 } |
5450 | |
5451 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5452 _refine_cte_cl->set_concurrent(concurrent); | |
5453 } | |
5454 | |
5455 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5456 HeapRegion* hr = heap_region_containing(p); | |
5457 if (hr == NULL) { | |
5458 return is_in_permanent(p); | |
5459 } else { | |
5460 return hr->is_in(p); | |
5461 } | |
5462 } | |
2152 | 5463 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5464 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5465 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5466 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5467 assert(!force || g1_policy()->can_expand_young_list(), |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5468 "if force is true we should be able to expand the young list"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5469 if (force || !g1_policy()->is_young_list_full()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5470 HeapRegion* new_alloc_region = new_region(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5471 false /* do_expand */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5472 if (new_alloc_region != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5473 g1_policy()->update_region_num(true /* next_is_young */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5474 set_region_short_lived_locked(new_alloc_region); |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
5475 g1mm()->update_eden_counters(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5476 return new_alloc_region; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5477 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5478 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5479 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5480 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5481 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5482 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5483 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5484 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5485 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5486 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5487 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5488 _summary_bytes_used += allocated_bytes; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5489 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5490 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5491 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5492 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5493 return _g1h->new_mutator_alloc_region(word_size, force); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5494 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5495 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5496 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5497 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5498 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5499 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5500 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5501 // Heap region set verification |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5502 |
2152 | 5503 class VerifyRegionListsClosure : public HeapRegionClosure { |
5504 private: | |
5505 HumongousRegionSet* _humongous_set; | |
5506 FreeRegionList* _free_list; | |
5507 size_t _region_count; | |
5508 | |
5509 public: | |
5510 VerifyRegionListsClosure(HumongousRegionSet* humongous_set, | |
5511 FreeRegionList* free_list) : | |
5512 _humongous_set(humongous_set), _free_list(free_list), | |
5513 _region_count(0) { } | |
5514 | |
5515 size_t region_count() { return _region_count; } | |
5516 | |
5517 bool doHeapRegion(HeapRegion* hr) { | |
5518 _region_count += 1; | |
5519 | |
5520 if (hr->continuesHumongous()) { | |
5521 return false; | |
5522 } | |
5523 | |
5524 if (hr->is_young()) { | |
5525 // TODO | |
5526 } else if (hr->startsHumongous()) { | |
5527 _humongous_set->verify_next_region(hr); | |
5528 } else if (hr->is_empty()) { | |
5529 _free_list->verify_next_region(hr); | |
5530 } | |
5531 return false; | |
5532 } | |
5533 }; | |
5534 | |
3766 | 5535 HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index, |
5536 HeapWord* bottom) { | |
5537 HeapWord* end = bottom + HeapRegion::GrainWords; | |
5538 MemRegion mr(bottom, end); | |
5539 assert(_g1_reserved.contains(mr), "invariant"); | |
5540 // This might return NULL if the allocation fails | |
5541 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */); | |
5542 } | |
5543 | |
2152 | 5544 void G1CollectedHeap::verify_region_sets() { |
5545 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | |
5546 | |
5547 // First, check the explicit lists. | |
5548 _free_list.verify(); | |
5549 { | |
5550 // Given that a concurrent operation might be adding regions to | |
5551 // the secondary free list we have to take the lock before | |
5552 // verifying it. | |
5553 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5554 _secondary_free_list.verify(); | |
5555 } | |
5556 _humongous_set.verify(); | |
5557 | |
5558 // If a concurrent region freeing operation is in progress it will | |
5559 // be difficult to correctly attributed any free regions we come | |
5560 // across to the correct free list given that they might belong to | |
5561 // one of several (free_list, secondary_free_list, any local lists, | |
5562 // etc.). So, if that's the case we will skip the rest of the | |
5563 // verification operation. Alternatively, waiting for the concurrent | |
5564 // operation to complete will have a non-trivial effect on the GC's | |
5565 // operation (no concurrent operation will last longer than the | |
5566 // interval between two calls to verification) and it might hide | |
5567 // any issues that we would like to catch during testing. | |
5568 if (free_regions_coming()) { | |
5569 return; | |
5570 } | |
5571 | |
2361 | 5572 // Make sure we append the secondary_free_list on the free_list so |
5573 // that all free regions we will come across can be safely | |
5574 // attributed to the free_list. | |
5575 append_secondary_free_list_if_not_empty_with_lock(); | |
2152 | 5576 |
5577 // Finally, make sure that the region accounting in the lists is | |
5578 // consistent with what we see in the heap. | |
5579 _humongous_set.verify_start(); | |
5580 _free_list.verify_start(); | |
5581 | |
5582 VerifyRegionListsClosure cl(&_humongous_set, &_free_list); | |
5583 heap_region_iterate(&cl); | |
5584 | |
5585 _humongous_set.verify_end(); | |
5586 _free_list.verify_end(); | |
342 | 5587 } |