Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3378:69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
Summary: During an evacuation pause clear the region fields of any concurrent marking task whose local finger points into the collection set as the values in the region fields will become stale. Clearing these fields causes the concurrent mark task to claim a new region when marking restarts after the pause.
Reviewed-by: tonyp, iveresov
author | johnc |
---|---|
date | Tue, 17 May 2011 00:56:01 -0700 |
parents | 2aa9ddbb9e60 |
children | 053d84a76d3d |
rev | line source |
---|---|
342 | 1 /* |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
1972 | 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _cmThread->stop(); |
429 } | |
430 | |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
431 #ifdef ASSERT |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
432 // A region is added to the collection set as it is retired |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
433 // so an address p can point to a region which will be in the |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
434 // collection set but has not yet been retired. This method |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
435 // therefore is only accurate during a GC pause after all |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
436 // regions have been retired. It is used for debugging |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
437 // to check if an nmethod has references to objects that can |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
438 // be move during a partial collection. Though it can be |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
439 // inaccurate, it is sufficient for G1 because the conservative |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
440 // implementation of is_scavengable() for G1 will indicate that |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
441 // all nmethods must be scanned during a partial collection. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
442 bool G1CollectedHeap::is_in_partial_collection(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
443 HeapRegion* hr = heap_region_containing(p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
444 return hr != NULL && hr->in_collection_set(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
445 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
446 #endif |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
447 |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
448 // Returns true if the reference points to an object that |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
449 // can move in an incremental collecction. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
450 bool G1CollectedHeap::is_scavengable(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
451 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
452 G1CollectorPolicy* g1p = g1h->g1_policy(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
453 HeapRegion* hr = heap_region_containing(p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
454 if (hr == NULL) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
455 // perm gen (or null) |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
456 return false; |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
457 } else { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
458 return !hr->isHumongous(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
459 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
460 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
461 |
342 | 462 void G1CollectedHeap::check_ct_logs_at_safepoint() { |
463 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
464 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
465 | |
466 // Count the dirty cards at the start. | |
467 CountNonCleanMemRegionClosure count1(this); | |
468 ct_bs->mod_card_iterate(&count1); | |
469 int orig_count = count1.n(); | |
470 | |
471 // First clear the logged cards. | |
472 ClearLoggedCardTableEntryClosure clear; | |
473 dcqs.set_closure(&clear); | |
474 dcqs.apply_closure_to_all_completed_buffers(); | |
475 dcqs.iterate_closure_all_threads(false); | |
476 clear.print_histo(); | |
477 | |
478 // Now ensure that there's no dirty cards. | |
479 CountNonCleanMemRegionClosure count2(this); | |
480 ct_bs->mod_card_iterate(&count2); | |
481 if (count2.n() != 0) { | |
482 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
483 count2.n(), orig_count); | |
484 } | |
485 guarantee(count2.n() == 0, "Card table should be clean."); | |
486 | |
487 RedirtyLoggedCardTableEntryClosure redirty; | |
488 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
489 dcqs.apply_closure_to_all_completed_buffers(); | |
490 dcqs.iterate_closure_all_threads(false); | |
491 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
492 clear.calls(), orig_count); | |
493 guarantee(redirty.calls() == clear.calls(), | |
494 "Or else mechanism is broken."); | |
495 | |
496 CountNonCleanMemRegionClosure count3(this); | |
497 ct_bs->mod_card_iterate(&count3); | |
498 if (count3.n() != orig_count) { | |
499 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
500 orig_count, count3.n()); | |
501 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
502 } | |
503 | |
504 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
505 } | |
506 | |
507 // Private class members. | |
508 | |
509 G1CollectedHeap* G1CollectedHeap::_g1h; | |
510 | |
511 // Private methods. | |
512 | |
2152 | 513 HeapRegion* |
2361 | 514 G1CollectedHeap::new_region_try_secondary_free_list() { |
2152 | 515 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
516 while (!_secondary_free_list.is_empty() || free_regions_coming()) { | |
517 if (!_secondary_free_list.is_empty()) { | |
518 if (G1ConcRegionFreeingVerbose) { | |
519 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
520 "secondary_free_list has "SIZE_FORMAT" entries", | |
521 _secondary_free_list.length()); | |
522 } | |
523 // It looks as if there are free regions available on the | |
524 // secondary_free_list. Let's move them to the free_list and try | |
525 // again to allocate from it. | |
526 append_secondary_free_list(); | |
527 | |
528 assert(!_free_list.is_empty(), "if the secondary_free_list was not " | |
529 "empty we should have moved at least one entry to the free_list"); | |
530 HeapRegion* res = _free_list.remove_head(); | |
531 if (G1ConcRegionFreeingVerbose) { | |
532 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
533 "allocated "HR_FORMAT" from secondary_free_list", | |
534 HR_FORMAT_PARAMS(res)); | |
535 } | |
536 return res; | |
537 } | |
538 | |
539 // Wait here until we get notifed either when (a) there are no | |
540 // more free regions coming or (b) some regions have been moved on | |
541 // the secondary_free_list. | |
542 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
543 } | |
544 | |
545 if (G1ConcRegionFreeingVerbose) { | |
546 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
547 "could not allocate from secondary_free_list"); | |
548 } | |
549 return NULL; | |
550 } | |
551 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
552 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { |
2152 | 553 assert(!isHumongous(word_size) || |
554 word_size <= (size_t) HeapRegion::GrainWords, | |
555 "the only time we use this to allocate a humongous region is " | |
556 "when we are allocating a single humongous region"); | |
557 | |
558 HeapRegion* res; | |
559 if (G1StressConcRegionFreeing) { | |
560 if (!_secondary_free_list.is_empty()) { | |
561 if (G1ConcRegionFreeingVerbose) { | |
562 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
563 "forced to look at the secondary_free_list"); | |
564 } | |
2361 | 565 res = new_region_try_secondary_free_list(); |
2152 | 566 if (res != NULL) { |
567 return res; | |
568 } | |
569 } | |
570 } | |
571 res = _free_list.remove_head_or_null(); | |
572 if (res == NULL) { | |
573 if (G1ConcRegionFreeingVerbose) { | |
574 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
575 "res == NULL, trying the secondary_free_list"); | |
576 } | |
2361 | 577 res = new_region_try_secondary_free_list(); |
2152 | 578 } |
342 | 579 if (res == NULL && do_expand) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
580 if (expand(word_size * HeapWordSize)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
581 // The expansion succeeded and so we should have at least one |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
582 // region on the free list. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
583 res = _free_list.remove_head(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
584 } |
342 | 585 } |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
586 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
587 if (G1PrintHeapRegions) { |
2152 | 588 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " |
589 "top "PTR_FORMAT, res->hrs_index(), | |
590 res->bottom(), res->end(), res->top()); | |
342 | 591 } |
592 } | |
593 return res; | |
594 } | |
595 | |
2152 | 596 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, |
597 size_t word_size) { | |
342 | 598 HeapRegion* alloc_region = NULL; |
599 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
600 alloc_region = new_region(word_size, true /* do_expand */); |
342 | 601 if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
545 | 602 alloc_region->set_survivor(); |
342 | 603 } |
604 ++_gc_alloc_region_counts[purpose]; | |
605 } else { | |
606 g1_policy()->note_alloc_region_limit_reached(purpose); | |
607 } | |
608 return alloc_region; | |
609 } | |
610 | |
2152 | 611 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, |
612 size_t word_size) { | |
2361 | 613 assert(isHumongous(word_size), "word_size should be humongous"); |
614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
615 | |
2152 | 616 int first = -1; |
617 if (num_regions == 1) { | |
618 // Only one region to allocate, no need to go through the slower | |
619 // path. The caller will attempt the expasion if this fails, so | |
620 // let's not try to expand here too. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
621 HeapRegion* hr = new_region(word_size, false /* do_expand */); |
2152 | 622 if (hr != NULL) { |
623 first = hr->hrs_index(); | |
624 } else { | |
625 first = -1; | |
626 } | |
627 } else { | |
628 // We can't allocate humongous regions while cleanupComplete() is | |
629 // running, since some of the regions we find to be empty might not | |
630 // yet be added to the free list and it is not straightforward to | |
631 // know which list they are on so that we can remove them. Note | |
632 // that we only need to do this if we need to allocate more than | |
633 // one region to satisfy the current humongous allocation | |
634 // request. If we are only allocating one region we use the common | |
635 // region allocation code (see above). | |
636 wait_while_free_regions_coming(); | |
2361 | 637 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 638 |
639 if (free_regions() >= num_regions) { | |
640 first = _hrs->find_contiguous(num_regions); | |
641 if (first != -1) { | |
642 for (int i = first; i < first + (int) num_regions; ++i) { | |
643 HeapRegion* hr = _hrs->at(i); | |
644 assert(hr->is_empty(), "sanity"); | |
2361 | 645 assert(is_on_master_free_list(hr), "sanity"); |
2152 | 646 hr->set_pending_removal(true); |
647 } | |
648 _free_list.remove_all_pending(num_regions); | |
649 } | |
650 } | |
651 } | |
652 return first; | |
653 } | |
654 | |
2361 | 655 HeapWord* |
656 G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, | |
657 size_t num_regions, | |
658 size_t word_size) { | |
659 assert(first != -1, "pre-condition"); | |
660 assert(isHumongous(word_size), "word_size should be humongous"); | |
661 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
662 | |
663 // Index of last region in the series + 1. | |
664 int last = first + (int) num_regions; | |
665 | |
666 // We need to initialize the region(s) we just discovered. This is | |
667 // a bit tricky given that it can happen concurrently with | |
668 // refinement threads refining cards on these regions and | |
669 // potentially wanting to refine the BOT as they are scanning | |
670 // those cards (this can happen shortly after a cleanup; see CR | |
671 // 6991377). So we have to set up the region(s) carefully and in | |
672 // a specific order. | |
673 | |
674 // The word size sum of all the regions we will allocate. | |
675 size_t word_size_sum = num_regions * HeapRegion::GrainWords; | |
676 assert(word_size <= word_size_sum, "sanity"); | |
677 | |
678 // This will be the "starts humongous" region. | |
679 HeapRegion* first_hr = _hrs->at(first); | |
680 // The header of the new object will be placed at the bottom of | |
681 // the first region. | |
682 HeapWord* new_obj = first_hr->bottom(); | |
683 // This will be the new end of the first region in the series that | |
684 // should also match the end of the last region in the seriers. | |
685 HeapWord* new_end = new_obj + word_size_sum; | |
686 // This will be the new top of the first region that will reflect | |
687 // this allocation. | |
688 HeapWord* new_top = new_obj + word_size; | |
689 | |
690 // First, we need to zero the header of the space that we will be | |
691 // allocating. When we update top further down, some refinement | |
692 // threads might try to scan the region. By zeroing the header we | |
693 // ensure that any thread that will try to scan the region will | |
694 // come across the zero klass word and bail out. | |
695 // | |
696 // NOTE: It would not have been correct to have used | |
697 // CollectedHeap::fill_with_object() and make the space look like | |
698 // an int array. The thread that is doing the allocation will | |
699 // later update the object header to a potentially different array | |
700 // type and, for a very short period of time, the klass and length | |
701 // fields will be inconsistent. This could cause a refinement | |
702 // thread to calculate the object size incorrectly. | |
703 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); | |
704 | |
705 // We will set up the first region as "starts humongous". This | |
706 // will also update the BOT covering all the regions to reflect | |
707 // that there is a single object that starts at the bottom of the | |
708 // first region. | |
709 first_hr->set_startsHumongous(new_top, new_end); | |
710 | |
711 // Then, if there are any, we will set up the "continues | |
712 // humongous" regions. | |
713 HeapRegion* hr = NULL; | |
714 for (int i = first + 1; i < last; ++i) { | |
715 hr = _hrs->at(i); | |
716 hr->set_continuesHumongous(first_hr); | |
717 } | |
718 // If we have "continues humongous" regions (hr != NULL), then the | |
719 // end of the last one should match new_end. | |
720 assert(hr == NULL || hr->end() == new_end, "sanity"); | |
721 | |
722 // Up to this point no concurrent thread would have been able to | |
723 // do any scanning on any region in this series. All the top | |
724 // fields still point to bottom, so the intersection between | |
725 // [bottom,top] and [card_start,card_end] will be empty. Before we | |
726 // update the top fields, we'll do a storestore to make sure that | |
727 // no thread sees the update to top before the zeroing of the | |
728 // object header and the BOT initialization. | |
729 OrderAccess::storestore(); | |
730 | |
731 // Now that the BOT and the object header have been initialized, | |
732 // we can update top of the "starts humongous" region. | |
733 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), | |
734 "new_top should be in this region"); | |
735 first_hr->set_top(new_top); | |
736 | |
737 // Now, we will update the top fields of the "continues humongous" | |
738 // regions. The reason we need to do this is that, otherwise, | |
739 // these regions would look empty and this will confuse parts of | |
740 // G1. For example, the code that looks for a consecutive number | |
741 // of empty regions will consider them empty and try to | |
742 // re-allocate them. We can extend is_empty() to also include | |
743 // !continuesHumongous(), but it is easier to just update the top | |
744 // fields here. The way we set top for all regions (i.e., top == | |
745 // end for all regions but the last one, top == new_top for the | |
746 // last one) is actually used when we will free up the humongous | |
747 // region in free_humongous_region(). | |
748 hr = NULL; | |
749 for (int i = first + 1; i < last; ++i) { | |
750 hr = _hrs->at(i); | |
751 if ((i + 1) == last) { | |
752 // last continues humongous region | |
753 assert(hr->bottom() < new_top && new_top <= hr->end(), | |
754 "new_top should fall on this region"); | |
755 hr->set_top(new_top); | |
756 } else { | |
757 // not last one | |
758 assert(new_top > hr->end(), "new_top should be above this region"); | |
759 hr->set_top(hr->end()); | |
760 } | |
761 } | |
762 // If we have continues humongous regions (hr != NULL), then the | |
763 // end of the last one should match new_end and its top should | |
764 // match new_top. | |
765 assert(hr == NULL || | |
766 (hr->end() == new_end && hr->top() == new_top), "sanity"); | |
767 | |
768 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); | |
769 _summary_bytes_used += first_hr->used(); | |
770 _humongous_set.add(first_hr); | |
771 | |
772 return new_obj; | |
773 } | |
774 | |
342 | 775 // If could fit into free regions w/o expansion, try. |
776 // Otherwise, if can expand, do so. | |
777 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 778 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
2152 | 779 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
780 | |
781 verify_region_sets_optional(); | |
342 | 782 |
783 size_t num_regions = | |
1973 | 784 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 785 size_t x_size = expansion_regions(); |
2152 | 786 size_t fs = _hrs->free_suffix(); |
787 int first = humongous_obj_allocate_find_first(num_regions, word_size); | |
788 if (first == -1) { | |
789 // The only thing we can do now is attempt expansion. | |
342 | 790 if (fs + x_size >= num_regions) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
791 // If the number of regions we're trying to allocate for this |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
792 // object is at most the number of regions in the free suffix, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
793 // then the call to humongous_obj_allocate_find_first() above |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
794 // should have succeeded and we wouldn't be here. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
795 // |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
796 // We should only be trying to expand when the free suffix is |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
797 // not sufficient for the object _and_ we have some expansion |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
798 // room available. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
799 assert(num_regions > fs, "earlier allocation should have succeeded"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
800 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
801 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
802 first = humongous_obj_allocate_find_first(num_regions, word_size); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
803 // If the expansion was successful then the allocation |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
804 // should have been successful. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
805 assert(first != -1, "this should have worked"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
806 } |
2152 | 807 } |
808 } | |
809 | |
2361 | 810 HeapWord* result = NULL; |
2152 | 811 if (first != -1) { |
2361 | 812 result = |
813 humongous_obj_allocate_initialize_regions(first, num_regions, word_size); | |
814 assert(result != NULL, "it should always return a valid result"); | |
2152 | 815 } |
816 | |
817 verify_region_sets_optional(); | |
2361 | 818 |
819 return result; | |
342 | 820 } |
821 | |
1973 | 822 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
823 assert_heap_not_locked_and_not_at_safepoint(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
824 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
825 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
826 unsigned int dummy_gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
827 return attempt_allocation(word_size, &dummy_gc_count_before); |
342 | 828 } |
829 | |
830 HeapWord* | |
831 G1CollectedHeap::mem_allocate(size_t word_size, | |
832 bool is_noref, | |
833 bool is_tlab, | |
1973 | 834 bool* gc_overhead_limit_was_exceeded) { |
835 assert_heap_not_locked_and_not_at_safepoint(); | |
836 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
837 "to allocate TLABs"); | |
342 | 838 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
839 // Loop until the allocation is satisified, or unsatisfied after GC. |
1973 | 840 for (int try_count = 1; /* we'll return */; try_count += 1) { |
841 unsigned int gc_count_before; | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
842 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
843 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
844 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
845 result = attempt_allocation(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
846 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
847 result = attempt_allocation_humongous(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
848 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
849 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
850 return result; |
342 | 851 } |
852 | |
853 // Create the garbage collection operation... | |
1973 | 854 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 855 // ...and get the VM thread to execute it. |
856 VMThread::execute(&op); | |
1973 | 857 |
858 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
859 // If the operation was successful we'll return the result even | |
860 // if it is NULL. If the allocation attempt failed immediately | |
861 // after a Full GC, it's unlikely we'll be able to allocate now. | |
862 HeapWord* result = op.result(); | |
863 if (result != NULL && !isHumongous(word_size)) { | |
864 // Allocations that take place on VM operations do not do any | |
865 // card dirtying and we have to do it here. We only have to do | |
866 // this for non-humongous allocations, though. | |
867 dirty_young_block(result, word_size); | |
868 } | |
342 | 869 return result; |
1973 | 870 } else { |
871 assert(op.result() == NULL, | |
872 "the result should be NULL if the VM op did not succeed"); | |
342 | 873 } |
874 | |
875 // Give a warning if we seem to be looping forever. | |
876 if ((QueuedAllocationWarningCount > 0) && | |
877 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 878 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 879 } |
880 } | |
1973 | 881 |
882 ShouldNotReachHere(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
883 return NULL; |
342 | 884 } |
885 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
886 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
887 unsigned int *gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
888 // Make sure you read the note in attempt_allocation_humongous(). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
889 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
890 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
891 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
892 "be called for humongous allocation requests"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
893 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
894 // We should only get here after the first-level allocation attempt |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
895 // (attempt_allocation()) failed to allocate. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
896 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
897 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
898 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
899 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
900 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
901 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
902 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
903 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
904 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
905 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
906 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
907 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
908 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
909 result = _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
910 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
911 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
912 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
913 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
914 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
915 // If we reach here, attempt_allocation_locked() above failed to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
916 // allocate a new region. So the mutator alloc region should be NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
917 assert(_mutator_alloc_region.get() == NULL, "only way to get here"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
918 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
919 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
920 if (g1_policy()->can_expand_young_list()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
921 result = _mutator_alloc_region.attempt_allocation_force(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
922 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
923 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
924 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
925 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
926 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
927 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
928 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
929 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
930 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
931 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
932 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
933 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
934 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
935 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
936 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
937 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
938 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
939 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
940 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
941 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
942 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
943 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
944 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
945 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
946 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
947 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
948 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
949 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
950 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
951 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
952 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
953 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
954 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
955 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
956 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
957 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
958 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
959 // performed a collection and reclaimed enough space. We do the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
960 // first attempt (without holding the Heap_lock) here and the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
961 // follow-on attempt will be at the start of the next loop |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
962 // iteration (after taking the Heap_lock). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
963 result = _mutator_alloc_region.attempt_allocation(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
964 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
965 if (result != NULL ){ |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
966 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
967 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
968 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
969 // Give a warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
970 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
971 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
972 warning("G1CollectedHeap::attempt_allocation_slow() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
973 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
974 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
975 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
976 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
977 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
978 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
979 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
980 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
981 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
982 unsigned int * gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
983 // The structure of this method has a lot of similarities to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
984 // attempt_allocation_slow(). The reason these two were not merged |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
985 // into a single one is that such a method would require several "if |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
986 // allocation is not humongous do this, otherwise do that" |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
987 // conditional paths which would obscure its flow. In fact, an early |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
988 // version of this code did use a unified method which was harder to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
989 // follow and, as a result, it had subtle bugs that were hard to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
990 // track down. So keeping these two methods separate allows each to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
991 // be more readable. It will be good to keep these two in sync as |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
992 // much as possible. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
993 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
994 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
995 assert(isHumongous(word_size), "attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
996 "should only be called for humongous allocations"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
997 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
998 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
999 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1000 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1001 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1002 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1003 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1004 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1005 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1006 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1007 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1008 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1009 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1010 // Given that humongous objects are not allocated in young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1011 // regions, we'll first try to do the allocation without doing a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1012 // collection hoping that there's enough space in the heap. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1013 result = humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1014 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1015 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1016 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1017 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1018 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1019 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1020 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1021 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1022 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1023 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1024 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1025 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1026 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1027 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1028 // If we failed to allocate the humongous object, we should try to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1029 // do a collection pause (if we're allowed) in case it reclaims |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1030 // enough space for the allocation to succeed after the pause. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1031 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1032 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1033 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1034 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1035 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1036 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1037 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1038 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1039 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1040 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1041 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1042 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1043 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1044 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1045 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1046 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1047 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1048 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1049 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1050 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1051 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1052 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1053 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1054 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1055 // performed a collection and reclaimed enough space. Give a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1056 // warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1057 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1058 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1059 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1060 warning("G1CollectedHeap::attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1061 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1062 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1063 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1064 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1065 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1066 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1067 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1068 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1069 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1070 bool expect_null_mutator_alloc_region) { |
2152 | 1071 assert_at_safepoint(true /* should_be_vm_thread */); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1072 assert(_mutator_alloc_region.get() == NULL || |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1073 !expect_null_mutator_alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1074 "the current alloc region was unexpectedly found to be non-NULL"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1075 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1076 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1077 return _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1078 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1079 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1080 return humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1081 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1082 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1083 ShouldNotReachHere(); |
342 | 1084 } |
1085 | |
636 | 1086 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1087 // first, make sure that the GC alloc region list is empty (it should!) | |
1088 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1089 release_gc_alloc_regions(true /* totally */); | |
1090 } | |
1091 | |
342 | 1092 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1093 ModRefBarrierSet* _mr_bs; | |
1094 public: | |
1095 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1096 bool doHeapRegion(HeapRegion* r) { | |
1097 r->reset_gc_time_stamp(); | |
1098 if (r->continuesHumongous()) | |
1099 return false; | |
1100 HeapRegionRemSet* hrrs = r->rem_set(); | |
1101 if (hrrs != NULL) hrrs->clear(); | |
1102 // You might think here that we could clear just the cards | |
1103 // corresponding to the used region. But no: if we leave a dirty card | |
1104 // in a region we might allocate into, then it would prevent that card | |
1105 // from being enqueued, and cause it to be missed. | |
1106 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1107 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1108 return false; | |
1109 } | |
1110 }; | |
1111 | |
1112 | |
1113 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1114 ModRefBarrierSet* _mr_bs; | |
1115 public: | |
1116 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1117 bool doHeapRegion(HeapRegion* r) { | |
1118 if (r->continuesHumongous()) return false; | |
1119 if (r->used_region().word_size() != 0) { | |
1120 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1121 } | |
1122 return false; | |
1123 } | |
1124 }; | |
1125 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1126 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1127 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1128 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1129 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1130 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1131 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1132 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1133 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1134 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1135 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1136 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1137 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1138 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1139 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1140 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1141 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1142 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1143 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1144 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1145 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1146 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1147 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1148 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1149 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1150 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1151 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1152 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1153 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1154 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1155 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1156 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1157 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1158 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1159 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1160 |
1973 | 1161 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1162 bool clear_all_soft_refs, |
342 | 1163 size_t word_size) { |
2152 | 1164 assert_at_safepoint(true /* should_be_vm_thread */); |
1165 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1166 if (GC_locker::check_active_before_gc()) { |
1973 | 1167 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1168 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1169 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
1170 SvcGCMarker sgcm(SvcGCMarker::FULL); |
342 | 1171 ResourceMark rm; |
1172 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1173 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1174 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1175 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1176 |
2152 | 1177 verify_region_sets_optional(); |
342 | 1178 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1179 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1180 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1181 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1182 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1183 |
342 | 1184 { |
1185 IsGCActiveMark x; | |
1186 | |
1187 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1188 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1189 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1190 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1191 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1192 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1193 PrintGC, true, gclog_or_tty); |
342 | 1194 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
1195 TraceCollectorStats tcs(g1mm()->full_collection_counters()); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3323
diff
changeset
|
1196 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1197 |
342 | 1198 double start = os::elapsedTime(); |
1199 g1_policy()->record_full_collection_start(); | |
1200 | |
2152 | 1201 wait_while_free_regions_coming(); |
2361 | 1202 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 1203 |
342 | 1204 gc_prologue(true); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1205 increment_total_collections(true /* full gc */); |
342 | 1206 |
1207 size_t g1h_prev_used = used(); | |
1208 assert(used() == recalculate_used(), "Should be equal"); | |
1209 | |
1210 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1211 HandleMark hm; // Discard invalid handles created during verification | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1212 gclog_or_tty->print(" VerifyBeforeGC:"); |
342 | 1213 prepare_for_verify(); |
1214 Universe::verify(true); | |
1215 } | |
1216 | |
1217 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1218 | |
1219 // We want to discover references, but not process them yet. | |
1220 // This mode is disabled in | |
1221 // instanceRefKlass::process_discovered_references if the | |
1222 // generation does some collection work, or | |
1223 // instanceRefKlass::enqueue_discovered_references if the | |
1224 // generation returns without doing any work. | |
1225 ref_processor()->disable_discovery(); | |
1226 ref_processor()->abandon_partial_discovery(); | |
1227 ref_processor()->verify_no_references_recorded(); | |
1228 | |
1229 // Abandon current iterations of concurrent marking and concurrent | |
1230 // refinement, if any are in progress. | |
1231 concurrent_mark()->abort(); | |
1232 | |
1233 // Make sure we'll choose a new allocation region afterwards. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1234 release_mutator_alloc_region(); |
636 | 1235 abandon_gc_alloc_regions(); |
1861 | 1236 g1_rem_set()->cleanupHRRS(); |
342 | 1237 tear_down_region_lists(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1238 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1239 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1240 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1241 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1242 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1243 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1244 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1245 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1246 |
342 | 1247 if (g1_policy()->in_young_gc_mode()) { |
1248 empty_young_list(); | |
1249 g1_policy()->set_full_young_gcs(true); | |
1250 } | |
1251 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1252 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1253 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1254 |
342 | 1255 // Temporarily make reference _discovery_ single threaded (non-MT). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
1256 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); |
342 | 1257 |
1258 // Temporarily make refs discovery atomic | |
1259 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1260 | |
1261 // Temporarily clear _is_alive_non_header | |
1262 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1263 | |
1264 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1265 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1266 |
1267 // Do collection work | |
1268 { | |
1269 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1270 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1271 } |
2152 | 1272 assert(free_regions() == 0, "we should not have added any free regions"); |
342 | 1273 rebuild_region_lists(); |
1274 | |
1275 _summary_bytes_used = recalculate_used(); | |
1276 | |
1277 ref_processor()->enqueue_discovered_references(); | |
1278 | |
1279 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1280 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1281 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1282 |
342 | 1283 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1284 HandleMark hm; // Discard invalid handles created during verification | |
1285 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1286 prepare_for_verify(); |
342 | 1287 Universe::verify(false); |
1288 } | |
1289 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1290 | |
1291 reset_gc_time_stamp(); | |
1292 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1293 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1294 // sets. We will also reset the GC time stamps of the regions. |
342 | 1295 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1296 heap_region_iterate(&rs_clear); | |
1297 | |
1298 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1299 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1300 |
1301 if (_cg1r->use_cache()) { | |
1302 _cg1r->clear_and_record_card_counts(); | |
1303 _cg1r->clear_hot_cache(); | |
1304 } | |
1305 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1306 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1307 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1308 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1309 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1310 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1311 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1312 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1313 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1314 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1315 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1316 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1317 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1318 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1321 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1322 |
342 | 1323 if (PrintGC) { |
1324 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1325 } | |
1326 | |
1327 if (true) { // FIXME | |
1328 // Ask the permanent generation to adjust size for full collections | |
1329 perm()->compute_new_size(); | |
1330 } | |
1331 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1332 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1333 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1334 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1336 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1337 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1338 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1339 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1340 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1341 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1342 |
342 | 1343 double end = os::elapsedTime(); |
1344 g1_policy()->record_full_collection_end(); | |
1345 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1346 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1347 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1348 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1349 |
342 | 1350 gc_epilogue(true); |
1351 | |
794 | 1352 // Discard all rset updates |
1353 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1354 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1355 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1356 } |
1357 | |
1358 if (g1_policy()->in_young_gc_mode()) { | |
1359 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1360 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1361 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1362 assert( check_young_list_empty(true /* check_heap */), |
342 | 1363 "young list should be empty at this point"); |
1364 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1365 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1366 // Update the number of full collections that have been completed. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
1367 increment_full_collections_completed(false /* concurrent */); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1368 |
2152 | 1369 verify_region_sets_optional(); |
1370 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1371 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1372 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1373 } |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
1374 g1mm()->update_counters(); |
1973 | 1375 |
1376 return true; | |
342 | 1377 } |
1378 | |
1379 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1380 // do_collection() will return whether it succeeded in performing |
1381 // the GC. Currently, there is no facility on the | |
1382 // do_full_collection() API to notify the caller than the collection | |
1383 // did not succeed (e.g., because it was locked out by the GC | |
1384 // locker). So, right now, we'll ignore the return value. | |
1385 bool dummy = do_collection(true, /* explicit_gc */ | |
1386 clear_all_soft_refs, | |
1387 0 /* word_size */); | |
342 | 1388 } |
1389 | |
1390 // This code is mostly copied from TenuredGeneration. | |
1391 void | |
1392 G1CollectedHeap:: | |
1393 resize_if_necessary_after_full_collection(size_t word_size) { | |
1394 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1395 | |
1396 // Include the current allocation, if any, and bytes that will be | |
1397 // pre-allocated to support collections, as "used". | |
1398 const size_t used_after_gc = used(); | |
1399 const size_t capacity_after_gc = capacity(); | |
1400 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1401 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1402 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1403 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1404 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1405 |
342 | 1406 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1407 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1408 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1409 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1410 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1411 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1412 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1413 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1414 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1415 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1416 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1417 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1418 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1419 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1420 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1421 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1422 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1423 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1424 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1425 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1426 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1427 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1428 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1429 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1430 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1432 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1433 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1438 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1439 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1440 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1441 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1443 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1444 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1445 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1446 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1447 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1448 |
1449 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1450 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1451 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1452 gclog_or_tty->print_cr("Computing new size after full GC "); |
1453 gclog_or_tty->print_cr(" " | |
1454 " minimum_free_percentage: %6.2f", | |
1455 minimum_free_percentage); | |
1456 gclog_or_tty->print_cr(" " | |
1457 " maximum_free_percentage: %6.2f", | |
1458 maximum_free_percentage); | |
1459 gclog_or_tty->print_cr(" " | |
1460 " capacity: %6.1fK" | |
1461 " minimum_desired_capacity: %6.1fK" | |
1462 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1463 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1464 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1465 (double) maximum_desired_capacity / (double) K); |
342 | 1466 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1467 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1468 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1469 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1470 (double) used_after_gc / (double) K); |
342 | 1471 gclog_or_tty->print_cr(" " |
1472 " free_percentage: %6.2f", | |
1473 free_percentage); | |
1474 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1475 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1476 // Don't expand unless it's significant |
1477 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1478 if (expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1479 if (PrintGC && Verbose) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1480 gclog_or_tty->print_cr(" " |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1481 " expanding:" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1482 " max_heap_size: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1483 " minimum_desired_capacity: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1484 " expand_bytes: %6.1fK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1485 (double) max_heap_size / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1486 (double) minimum_desired_capacity / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1487 (double) expand_bytes / (double) K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1488 } |
342 | 1489 } |
1490 | |
1491 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1492 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1493 // Capacity too large, compute shrinking size |
1494 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1495 shrink(shrink_bytes); | |
1496 if (PrintGC && Verbose) { | |
1497 gclog_or_tty->print_cr(" " | |
1498 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1499 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1500 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1501 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1502 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1503 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1504 (double) shrink_bytes / (double) K); |
342 | 1505 } |
1506 } | |
1507 } | |
1508 | |
1509 | |
1510 HeapWord* | |
1973 | 1511 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1512 bool* succeeded) { | |
2152 | 1513 assert_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1514 |
1515 *succeeded = true; | |
1516 // Let's attempt the allocation first. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1517 HeapWord* result = |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1518 attempt_allocation_at_safepoint(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1519 false /* expect_null_mutator_alloc_region */); |
1973 | 1520 if (result != NULL) { |
1521 assert(*succeeded, "sanity"); | |
1522 return result; | |
1523 } | |
342 | 1524 |
1525 // In a G1 heap, we're supposed to keep allocation from failing by | |
1526 // incremental pauses. Therefore, at least for now, we'll favor | |
1527 // expansion over collection. (This might change in the future if we can | |
1528 // do something smarter than full collection to satisfy a failed alloc.) | |
1529 result = expand_and_allocate(word_size); | |
1530 if (result != NULL) { | |
1973 | 1531 assert(*succeeded, "sanity"); |
342 | 1532 return result; |
1533 } | |
1534 | |
1973 | 1535 // Expansion didn't work, we'll try to do a Full GC. |
1536 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1537 false, /* clear_all_soft_refs */ | |
1538 word_size); | |
1539 if (!gc_succeeded) { | |
1540 *succeeded = false; | |
1541 return NULL; | |
1542 } | |
1543 | |
1544 // Retry the allocation | |
1545 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1546 true /* expect_null_mutator_alloc_region */); |
342 | 1547 if (result != NULL) { |
1973 | 1548 assert(*succeeded, "sanity"); |
342 | 1549 return result; |
1550 } | |
1551 | |
1973 | 1552 // Then, try a Full GC that will collect all soft references. |
1553 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1554 true, /* clear_all_soft_refs */ | |
1555 word_size); | |
1556 if (!gc_succeeded) { | |
1557 *succeeded = false; | |
1558 return NULL; | |
1559 } | |
1560 | |
1561 // Retry the allocation once more | |
1562 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1563 true /* expect_null_mutator_alloc_region */); |
342 | 1564 if (result != NULL) { |
1973 | 1565 assert(*succeeded, "sanity"); |
342 | 1566 return result; |
1567 } | |
1568 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1569 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1570 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1571 |
342 | 1572 // What else? We might try synchronous finalization later. If the total |
1573 // space available is large enough for the allocation, then a more | |
1574 // complete compaction phase than we've tried so far might be | |
1575 // appropriate. | |
1973 | 1576 assert(*succeeded, "sanity"); |
342 | 1577 return NULL; |
1578 } | |
1579 | |
1580 // Attempting to expand the heap sufficiently | |
1581 // to support an allocation of the given "word_size". If | |
1582 // successful, perform the allocation and return the address of the | |
1583 // allocated block, or else "NULL". | |
1584 | |
1585 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
2152 | 1586 assert_at_safepoint(true /* should_be_vm_thread */); |
1587 | |
1588 verify_region_sets_optional(); | |
1973 | 1589 |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1590 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1591 if (expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1592 verify_region_sets_optional(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1593 return attempt_allocation_at_safepoint(word_size, |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1594 false /* expect_null_mutator_alloc_region */); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1595 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1596 return NULL; |
342 | 1597 } |
1598 | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1599 bool G1CollectedHeap::expand(size_t expand_bytes) { |
342 | 1600 size_t old_mem_size = _g1_storage.committed_size(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1601 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
342 | 1602 aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
1603 HeapRegion::GrainBytes); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1604 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1605 if (Verbose && PrintGC) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1606 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1607 old_mem_size/K, aligned_expand_bytes/K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1608 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1609 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1610 HeapWord* old_end = (HeapWord*)_g1_storage.high(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1611 bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1612 if (successful) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1613 HeapWord* new_end = (HeapWord*)_g1_storage.high(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1614 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1615 // Expand the committed region. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1616 _g1_committed.set_end(new_end); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1617 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1618 // Tell the cardtable about the expansion. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1619 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1620 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1621 // And the offset table as well. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1622 _bot_shared->resize(_g1_committed.word_size()); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1623 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1624 expand_bytes = aligned_expand_bytes; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1625 HeapWord* base = old_end; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1626 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1627 // Create the heap regions for [old_end, new_end) |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1628 while (expand_bytes > 0) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1629 HeapWord* high = base + HeapRegion::GrainWords; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1630 |
342 | 1631 // Create a new HeapRegion. |
1632 MemRegion mr(base, high); | |
1633 bool is_zeroed = !_g1_max_committed.contains(base); | |
1634 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1635 | |
1636 // Add it to the HeapRegionSeq. | |
1637 _hrs->insert(hr); | |
2152 | 1638 _free_list.add_as_tail(hr); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1639 |
342 | 1640 // And we used up an expansion region to create it. |
1641 _expansion_regions--; | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1642 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1643 expand_bytes -= HeapRegion::GrainBytes; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1644 base += HeapRegion::GrainWords; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1645 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1646 assert(base == new_end, "sanity"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1647 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1648 // Now update max_committed if necessary. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1649 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end)); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1650 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1651 } else { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1652 // The expansion of the virtual storage space was unsuccessful. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1653 // Let's see if it was because we ran out of swap. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1654 if (G1ExitOnExpansionFailure && |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1655 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1656 // We had head room... |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1657 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); |
342 | 1658 } |
1659 } | |
2152 | 1660 |
342 | 1661 if (Verbose && PrintGC) { |
1662 size_t new_mem_size = _g1_storage.committed_size(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1663 gclog_or_tty->print_cr("...%s, expanded to %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1664 (successful ? "Successful" : "Failed"), |
342 | 1665 new_mem_size/K); |
1666 } | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1667 return successful; |
342 | 1668 } |
1669 | |
1670 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1671 { | |
1672 size_t old_mem_size = _g1_storage.committed_size(); | |
1673 size_t aligned_shrink_bytes = | |
1674 ReservedSpace::page_align_size_down(shrink_bytes); | |
1675 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1676 HeapRegion::GrainBytes); | |
1677 size_t num_regions_deleted = 0; | |
1678 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1679 | |
1680 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1681 if (mr.byte_size() > 0) | |
1682 _g1_storage.shrink_by(mr.byte_size()); | |
1683 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1684 | |
1685 _g1_committed.set_end(mr.start()); | |
1686 _expansion_regions += num_regions_deleted; | |
1687 | |
1688 // Tell the cardtable about it. | |
1689 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1690 | |
1691 // And the offset table as well. | |
1692 _bot_shared->resize(_g1_committed.word_size()); | |
1693 | |
1694 HeapRegionRemSet::shrink_heap(n_regions()); | |
1695 | |
1696 if (Verbose && PrintGC) { | |
1697 size_t new_mem_size = _g1_storage.committed_size(); | |
1698 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1699 old_mem_size/K, aligned_shrink_bytes/K, | |
1700 new_mem_size/K); | |
1701 } | |
1702 } | |
1703 | |
1704 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
2152 | 1705 verify_region_sets_optional(); |
1706 | |
636 | 1707 release_gc_alloc_regions(true /* totally */); |
2152 | 1708 // Instead of tearing down / rebuilding the free lists here, we |
1709 // could instead use the remove_all_pending() method on free_list to | |
1710 // remove only the ones that we need to remove. | |
342 | 1711 tear_down_region_lists(); // We will rebuild them in a moment. |
1712 shrink_helper(shrink_bytes); | |
1713 rebuild_region_lists(); | |
2152 | 1714 |
1715 verify_region_sets_optional(); | |
342 | 1716 } |
1717 | |
1718 // Public methods. | |
1719 | |
1720 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1721 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1722 #endif // _MSC_VER | |
1723 | |
1724 | |
1725 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1726 SharedHeap(policy_), | |
1727 _g1_policy(policy_), | |
1111 | 1728 _dirty_card_queue_set(false), |
1705 | 1729 _into_cset_dirty_card_queue_set(false), |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
1730 _is_alive_closure(this), |
342 | 1731 _ref_processor(NULL), |
1732 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1733 _bot_shared(NULL), | |
1734 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1735 _evac_failure_scan_stack(NULL) , | |
1736 _mark_in_progress(false), | |
2152 | 1737 _cg1r(NULL), _summary_bytes_used(0), |
342 | 1738 _refine_cte_cl(NULL), |
1739 _full_collection(false), | |
2152 | 1740 _free_list("Master Free List"), |
1741 _secondary_free_list("Secondary Free List"), | |
1742 _humongous_set("Master Humongous Set"), | |
1743 _free_regions_coming(false), | |
342 | 1744 _young_list(new YoungList(this)), |
1745 _gc_time_stamp(0), | |
526 | 1746 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1747 _full_collections_completed(0), |
526 | 1748 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1749 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1750 _dirty_cards_region_list(NULL) { |
342 | 1751 _g1h = this; // To catch bugs. |
1752 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1753 vm_exit_during_initialization("Failed necessary allocation."); | |
1754 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1755 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1756 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1757 |
342 | 1758 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1759 _task_queues = new RefToScanQueueSet(n_queues); | |
1760 | |
1761 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1762 assert(n_rem_sets > 0, "Invariant."); | |
1763 | |
1764 HeapRegionRemSetIterator** iter_arr = | |
1765 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1766 for (int i = 0; i < n_queues; i++) { | |
1767 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1768 } | |
1769 _rem_set_iterator = iter_arr; | |
1770 | |
1771 for (int i = 0; i < n_queues; i++) { | |
1772 RefToScanQueue* q = new RefToScanQueue(); | |
1773 q->initialize(); | |
1774 _task_queues->register_queue(i, q); | |
1775 } | |
1776 | |
1777 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1778 _gc_alloc_regions[ap] = NULL; |
1779 _gc_alloc_region_counts[ap] = 0; | |
1780 _retained_gc_alloc_regions[ap] = NULL; | |
1781 // by default, we do not retain a GC alloc region for each ap; | |
1782 // we'll override this, when appropriate, below | |
1783 _retain_gc_alloc_region[ap] = false; | |
1784 } | |
1785 | |
1786 // We will try to remember the last half-full tenured region we | |
1787 // allocated to at the end of a collection so that we can re-use it | |
1788 // during the next collection. | |
1789 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1790 | |
342 | 1791 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1792 } | |
1793 | |
1794 jint G1CollectedHeap::initialize() { | |
1166 | 1795 CollectedHeap::pre_initialize(); |
342 | 1796 os::enable_vtime(); |
1797 | |
1798 // Necessary to satisfy locking discipline assertions. | |
1799 | |
1800 MutexLocker x(Heap_lock); | |
1801 | |
1802 // While there are no constraints in the GC code that HeapWordSize | |
1803 // be any particular value, there are multiple other areas in the | |
1804 // system which believe this to be true (e.g. oop->object_size in some | |
1805 // cases incorrectly returns the size in wordSize units rather than | |
1806 // HeapWordSize). | |
1807 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1808 | |
1809 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1810 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1811 | |
1812 // Ensure that the sizes are properly aligned. | |
1813 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1814 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1815 | |
1816 _cg1r = new ConcurrentG1Refine(); | |
1817 | |
1818 // Reserve the maximum. | |
1819 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1820 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1821 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1822 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1823 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1824 |
342 | 1825 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1826 HeapRegion::GrainBytes, | |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1827 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1828 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1829 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1830 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1831 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1832 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1833 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1834 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1835 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1836 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1837 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1838 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1839 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1840 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1841 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1842 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1843 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1844 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1845 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1846 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1847 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1848 } |
342 | 1849 |
1850 if (!heap_rs.is_reserved()) { | |
1851 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1852 return JNI_ENOMEM; | |
1853 } | |
1854 | |
1855 // It is important to do this in a way such that concurrent readers can't | |
1856 // temporarily think somethings in the heap. (I've actually seen this | |
1857 // happen in asserts: DLD.) | |
1858 _reserved.set_word_size(0); | |
1859 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1860 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1861 | |
1862 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1863 | |
1864 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1865 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1866 set_barrier_set(rem_set()->bs()); | |
1867 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1868 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1869 } else { | |
1870 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1871 return JNI_ENOMEM; | |
1872 } | |
1873 | |
1874 // Also create a G1 rem set. | |
1861 | 1875 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1876 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1877 } else { |
1861 | 1878 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1879 return JNI_ENOMEM; | |
342 | 1880 } |
1881 | |
1882 // Carve out the G1 part of the heap. | |
1883 | |
1884 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1885 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1886 g1_rs.size()/HeapWordSize); | |
1887 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1888 | |
1889 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1890 | |
1891 _g1_storage.initialize(g1_rs, 0); | |
1892 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1893 _g1_max_committed = _g1_committed; | |
393 | 1894 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1895 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1896 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1897 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1898 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1899 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1900 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1901 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1902 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1903 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1904 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1905 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1906 |
2152 | 1907 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); |
1908 | |
342 | 1909 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1910 heap_word_size(init_byte_size)); | |
1911 | |
1912 _g1h = this; | |
1913 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1914 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1915 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1916 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1917 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1918 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1919 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1920 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1921 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1922 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1923 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1924 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1925 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1926 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1927 |
342 | 1928 // Create the ConcurrentMark data structure and thread. |
1929 // (Must do this late, so that "max_regions" is defined.) | |
1930 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1931 _cmThread = _cm->cmThread(); | |
1932 | |
1933 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1934 HeapRegionRemSet::init_heap(max_regions()); | |
1935 | |
677 | 1936 // Now expand into the initial heap size. |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1937 if (!expand(init_byte_size)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1938 vm_exit_during_initialization("Failed to allocate initial heap."); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1939 return JNI_ENOMEM; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1940 } |
342 | 1941 |
1942 // Perform any initialization actions delegated to the policy. | |
1943 g1_policy()->init(); | |
1944 | |
1945 g1_policy()->note_start_of_mark_thread(); | |
1946 | |
1947 _refine_cte_cl = | |
1948 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1949 g1_rem_set(), | |
1950 concurrent_g1_refine()); | |
1951 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1952 | |
1953 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1954 SATB_Q_FL_lock, | |
1111 | 1955 G1SATBProcessCompletedThreshold, |
342 | 1956 Shared_SATB_Q_lock); |
794 | 1957 |
1958 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1959 DirtyCardQ_FL_lock, | |
1111 | 1960 concurrent_g1_refine()->yellow_zone(), |
1961 concurrent_g1_refine()->red_zone(), | |
794 | 1962 Shared_DirtyCardQ_lock); |
1963 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1964 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1965 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1966 DirtyCardQ_FL_lock, |
1111 | 1967 -1, // never trigger processing |
1968 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1969 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1970 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1971 } |
1705 | 1972 |
1973 // Initialize the card queue set used to hold cards containing | |
1974 // references into the collection set. | |
1975 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1976 DirtyCardQ_FL_lock, | |
1977 -1, // never trigger processing | |
1978 -1, // no limit on length | |
1979 Shared_DirtyCardQ_lock, | |
1980 &JavaThread::dirty_card_queue_set()); | |
1981 | |
342 | 1982 // In case we're keeping closure specialization stats, initialize those |
1983 // counts and that mechanism. | |
1984 SpecializationStats::clear(); | |
1985 | |
1986 _gc_alloc_region_list = NULL; | |
1987 | |
1988 // Do later initialization work for concurrent refinement. | |
1989 _cg1r->init(); | |
1990 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1991 // Here we allocate the dummy full region that is required by the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1992 // G1AllocRegion class. If we don't pass an address in the reserved |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1993 // space here, lots of asserts fire. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1994 MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1995 HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1996 // We'll re-use the same region whether the alloc region will |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1997 // require BOT updates or not and, if it doesn't, then a non-young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1998 // region will complain that it cannot support allocations without |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1999 // BOT updates. So we'll tag the dummy region as young to avoid that. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2000 dummy_region->set_young(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2001 // Make sure it's full. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2002 dummy_region->set_top(dummy_region->end()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2003 G1AllocRegion::setup(this, dummy_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2004 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2005 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2006 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2007 // Do create of the monitoring and management support so that |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2008 // values in the heap have been properly initialized. |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2009 _g1mm = new G1MonitoringSupport(this, &_g1_storage); |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2010 |
342 | 2011 return JNI_OK; |
2012 } | |
2013 | |
2014 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2015 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2016 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2017 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2018 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2019 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2020 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2021 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2022 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2023 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2024 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2025 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2026 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2027 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2028 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2029 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2030 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2031 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2032 |
342 | 2033 SharedHeap::ref_processing_init(); |
2034 MemRegion mr = reserved_region(); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2035 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2036 new ReferenceProcessor(mr, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2037 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2038 (int) ParallelGCThreads, // degree of mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2039 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2040 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2041 false, // Reference discovery is not atomic |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2042 &_is_alive_closure, // is alive closure for efficiency |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2043 true); // Setting next fields of discovered |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2044 // lists requires a barrier. |
342 | 2045 } |
2046 | |
2047 size_t G1CollectedHeap::capacity() const { | |
2048 return _g1_committed.byte_size(); | |
2049 } | |
2050 | |
1705 | 2051 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2052 DirtyCardQueue* into_cset_dcq, | |
2053 bool concurrent, | |
342 | 2054 int worker_i) { |
889 | 2055 // Clean cards in the hot card cache |
1705 | 2056 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2057 |
342 | 2058 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2059 int n_completed_buffers = 0; | |
1705 | 2060 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2061 n_completed_buffers++; |
2062 } | |
2063 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2064 (double) n_completed_buffers); | |
2065 dcqs.clear_n_completed_buffers(); | |
2066 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2067 } | |
2068 | |
2069 | |
2070 // Computes the sum of the storage used by the various regions. | |
2071 | |
2072 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2073 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2074 "Should be owned on this thread's behalf."); |
342 | 2075 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2076 // Read only once in case it is set to NULL concurrently |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2077 HeapRegion* hr = _mutator_alloc_region.get(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2078 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2079 result += hr->used(); |
342 | 2080 return result; |
2081 } | |
2082 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2083 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2084 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2085 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2086 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2087 |
342 | 2088 class SumUsedClosure: public HeapRegionClosure { |
2089 size_t _used; | |
2090 public: | |
2091 SumUsedClosure() : _used(0) {} | |
2092 bool doHeapRegion(HeapRegion* r) { | |
2093 if (!r->continuesHumongous()) { | |
2094 _used += r->used(); | |
2095 } | |
2096 return false; | |
2097 } | |
2098 size_t result() { return _used; } | |
2099 }; | |
2100 | |
2101 size_t G1CollectedHeap::recalculate_used() const { | |
2102 SumUsedClosure blk; | |
2103 _hrs->iterate(&blk); | |
2104 return blk.result(); | |
2105 } | |
2106 | |
2107 #ifndef PRODUCT | |
2108 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2109 size_t _num; | |
2110 public: | |
677 | 2111 SumUsedRegionsClosure() : _num(0) {} |
342 | 2112 bool doHeapRegion(HeapRegion* r) { |
2113 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2114 _num += 1; | |
2115 } | |
2116 return false; | |
2117 } | |
2118 size_t result() { return _num; } | |
2119 }; | |
2120 | |
2121 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2122 SumUsedRegionsClosure blk; | |
2123 _hrs->iterate(&blk); | |
2124 return blk.result(); | |
2125 } | |
2126 #endif // PRODUCT | |
2127 | |
2128 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2152 | 2129 if (free_regions() > 0) return HeapRegion::GrainBytes; |
342 | 2130 // otherwise, is there space in the current allocation region? |
2131 | |
2132 // We need to store the current allocation region in a local variable | |
2133 // here. The problem is that this method doesn't take any locks and | |
2134 // there may be other threads which overwrite the current allocation | |
2135 // region field. attempt_allocation(), for example, sets it to NULL | |
2136 // and this can happen *after* the NULL check here but before the call | |
2137 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2138 // to be a problem in the optimized build, since the two loads of the | |
2139 // current allocation region field are optimized away. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2140 HeapRegion* hr = _mutator_alloc_region.get(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2141 if (hr == NULL) { |
342 | 2142 return 0; |
2143 } | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2144 return hr->free(); |
342 | 2145 } |
2146 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2147 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2148 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2149 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2150 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2151 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2152 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2153 #ifndef PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2154 void G1CollectedHeap::allocate_dummy_regions() { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2155 // Let's fill up most of the region |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2156 size_t word_size = HeapRegion::GrainWords - 1024; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2157 // And as a result the region we'll allocate will be humongous. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2158 guarantee(isHumongous(word_size), "sanity"); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2159 |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2160 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2161 // Let's use the existing mechanism for the allocation |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2162 HeapWord* dummy_obj = humongous_obj_allocate(word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2163 if (dummy_obj != NULL) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2164 MemRegion mr(dummy_obj, word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2165 CollectedHeap::fill_with_object(mr); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2166 } else { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2167 // If we can't allocate once, we probably cannot allocate |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2168 // again. Let's get out of the loop. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2169 break; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2170 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2171 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2172 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2173 #endif // !PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2174 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2175 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2176 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2177 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2178 // We assume that if concurrent == true, then the caller is a |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2179 // concurrent thread that was joined the Suspendible Thread |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2180 // Set. If there's ever a cheap way to check this, we should add an |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2181 // assert here. |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2182 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2183 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2184 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2185 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2186 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2187 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2188 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2189 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2190 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2191 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2192 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2193 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2194 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2195 // This is the case for the inner caller, i.e. a Full GC. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2196 assert(concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2197 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2198 (full_collections_started == _full_collections_completed + 2), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2199 err_msg("for inner caller (Full GC): full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2200 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2201 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2202 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2203 // This is the case for the outer caller, i.e. the concurrent cycle. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2204 assert(!concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2205 (full_collections_started == _full_collections_completed + 1), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2206 err_msg("for outer caller (concurrent cycle): " |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2207 "full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2208 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2209 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2210 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2211 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2212 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2213 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2214 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2215 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2216 // incorrectly see that a marking cyle is still in progress. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2217 if (concurrent) { |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2218 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2219 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2220 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2221 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2222 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2223 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2224 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2225 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2226 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2227 |
342 | 2228 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2152 | 2229 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 2230 GCCauseSetter gcs(this, cause); |
2231 switch (cause) { | |
2232 case GCCause::_heap_inspection: | |
2233 case GCCause::_heap_dump: { | |
2234 HandleMark hm; | |
2235 do_full_collection(false); // don't clear all soft refs | |
2236 break; | |
2237 } | |
2238 default: // XXX FIX ME | |
2239 ShouldNotReachHere(); // Unexpected use of this function | |
2240 } | |
2241 } | |
2242 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2243 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2244 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2245 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2246 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2247 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2248 unsigned int full_gc_count_before; |
342 | 2249 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2250 MutexLocker ml(Heap_lock); |
1973 | 2251 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2252 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2253 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2254 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2255 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2256 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2257 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2258 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2259 // concurrent cycle. We're setting word_size to 0 which means that |
2260 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2261 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2262 0, /* word_size */ |
2263 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2264 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2265 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2266 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2267 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2268 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2269 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2270 |
1973 | 2271 // Schedule a standard evacuation pause. We're setting word_size |
2272 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2273 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2274 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2275 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2276 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2277 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2278 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2279 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2280 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2281 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2282 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2283 } |
342 | 2284 } |
2285 } | |
2286 | |
2287 bool G1CollectedHeap::is_in(const void* p) const { | |
2288 if (_g1_committed.contains(p)) { | |
2289 HeapRegion* hr = _hrs->addr_to_region(p); | |
2290 return hr->is_in(p); | |
2291 } else { | |
2292 return _perm_gen->as_gen()->is_in(p); | |
2293 } | |
2294 } | |
2295 | |
2296 // Iteration functions. | |
2297 | |
2298 // Iterates an OopClosure over all ref-containing fields of objects | |
2299 // within a HeapRegion. | |
2300 | |
2301 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2302 MemRegion _mr; | |
2303 OopClosure* _cl; | |
2304 public: | |
2305 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2306 : _mr(mr), _cl(cl) {} | |
2307 bool doHeapRegion(HeapRegion* r) { | |
2308 if (! r->continuesHumongous()) { | |
2309 r->oop_iterate(_cl); | |
2310 } | |
2311 return false; | |
2312 } | |
2313 }; | |
2314 | |
678 | 2315 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2316 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2317 _hrs->iterate(&blk); | |
678 | 2318 if (do_perm) { |
2319 perm_gen()->oop_iterate(cl); | |
2320 } | |
342 | 2321 } |
2322 | |
678 | 2323 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2324 IterateOopClosureRegionClosure blk(mr, cl); |
2325 _hrs->iterate(&blk); | |
678 | 2326 if (do_perm) { |
2327 perm_gen()->oop_iterate(cl); | |
2328 } | |
342 | 2329 } |
2330 | |
2331 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2332 | |
2333 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2334 ObjectClosure* _cl; | |
2335 public: | |
2336 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2337 bool doHeapRegion(HeapRegion* r) { | |
2338 if (! r->continuesHumongous()) { | |
2339 r->object_iterate(_cl); | |
2340 } | |
2341 return false; | |
2342 } | |
2343 }; | |
2344 | |
678 | 2345 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2346 IterateObjectClosureRegionClosure blk(cl); |
2347 _hrs->iterate(&blk); | |
678 | 2348 if (do_perm) { |
2349 perm_gen()->object_iterate(cl); | |
2350 } | |
342 | 2351 } |
2352 | |
2353 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2354 // FIXME: is this right? | |
2355 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2356 } | |
2357 | |
2358 // Calls a SpaceClosure on a HeapRegion. | |
2359 | |
2360 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2361 SpaceClosure* _cl; | |
2362 public: | |
2363 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2364 bool doHeapRegion(HeapRegion* r) { | |
2365 _cl->do_space(r); | |
2366 return false; | |
2367 } | |
2368 }; | |
2369 | |
2370 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2371 SpaceClosureRegionClosure blk(cl); | |
2372 _hrs->iterate(&blk); | |
2373 } | |
2374 | |
2375 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
2376 _hrs->iterate(cl); | |
2377 } | |
2378 | |
2379 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
2380 HeapRegionClosure* cl) { | |
2381 _hrs->iterate_from(r, cl); | |
2382 } | |
2383 | |
2384 void | |
2385 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
2386 _hrs->iterate_from(idx, cl); | |
2387 } | |
2388 | |
2389 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
2390 | |
2391 void | |
2392 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2393 int worker, | |
2394 jint claim_value) { | |
355 | 2395 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2396 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2397 // try to spread out the starting points of the workers |
2398 const size_t start_index = regions / worker_num * (size_t) worker; | |
2399 | |
2400 // each worker will actually look at all regions | |
2401 for (size_t count = 0; count < regions; ++count) { | |
2402 const size_t index = (start_index + count) % regions; | |
2403 assert(0 <= index && index < regions, "sanity"); | |
2404 HeapRegion* r = region_at(index); | |
2405 // we'll ignore "continues humongous" regions (we'll process them | |
2406 // when we come across their corresponding "start humongous" | |
2407 // region) and regions already claimed | |
2408 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2409 continue; | |
2410 } | |
2411 // OK, try to claim it | |
342 | 2412 if (r->claimHeapRegion(claim_value)) { |
355 | 2413 // success! |
2414 assert(!r->continuesHumongous(), "sanity"); | |
2415 if (r->startsHumongous()) { | |
2416 // If the region is "starts humongous" we'll iterate over its | |
2417 // "continues humongous" first; in fact we'll do them | |
2418 // first. The order is important. In on case, calling the | |
2419 // closure on the "starts humongous" region might de-allocate | |
2420 // and clear all its "continues humongous" regions and, as a | |
2421 // result, we might end up processing them twice. So, we'll do | |
2422 // them first (notice: most closures will ignore them anyway) and | |
2423 // then we'll do the "starts humongous" region. | |
2424 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2425 HeapRegion* chr = region_at(ch_index); | |
2426 | |
2427 // if the region has already been claimed or it's not | |
2428 // "continues humongous" we're done | |
2429 if (chr->claim_value() == claim_value || | |
2430 !chr->continuesHumongous()) { | |
2431 break; | |
2432 } | |
2433 | |
2434 // Noone should have claimed it directly. We can given | |
2435 // that we claimed its "starts humongous" region. | |
2436 assert(chr->claim_value() != claim_value, "sanity"); | |
2437 assert(chr->humongous_start_region() == r, "sanity"); | |
2438 | |
2439 if (chr->claimHeapRegion(claim_value)) { | |
2440 // we should always be able to claim it; noone else should | |
2441 // be trying to claim this region | |
2442 | |
2443 bool res2 = cl->doHeapRegion(chr); | |
2444 assert(!res2, "Should not abort"); | |
2445 | |
2446 // Right now, this holds (i.e., no closure that actually | |
2447 // does something with "continues humongous" regions | |
2448 // clears them). We might have to weaken it in the future, | |
2449 // but let's leave these two asserts here for extra safety. | |
2450 assert(chr->continuesHumongous(), "should still be the case"); | |
2451 assert(chr->humongous_start_region() == r, "sanity"); | |
2452 } else { | |
2453 guarantee(false, "we should not reach here"); | |
2454 } | |
2455 } | |
2456 } | |
2457 | |
2458 assert(!r->continuesHumongous(), "sanity"); | |
2459 bool res = cl->doHeapRegion(r); | |
2460 assert(!res, "Should not abort"); | |
2461 } | |
2462 } | |
2463 } | |
2464 | |
390 | 2465 class ResetClaimValuesClosure: public HeapRegionClosure { |
2466 public: | |
2467 bool doHeapRegion(HeapRegion* r) { | |
2468 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2469 return false; | |
2470 } | |
2471 }; | |
2472 | |
2473 void | |
2474 G1CollectedHeap::reset_heap_region_claim_values() { | |
2475 ResetClaimValuesClosure blk; | |
2476 heap_region_iterate(&blk); | |
2477 } | |
2478 | |
355 | 2479 #ifdef ASSERT |
2480 // This checks whether all regions in the heap have the correct claim | |
2481 // value. I also piggy-backed on this a check to ensure that the | |
2482 // humongous_start_region() information on "continues humongous" | |
2483 // regions is correct. | |
2484 | |
2485 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2486 private: | |
2487 jint _claim_value; | |
2488 size_t _failures; | |
2489 HeapRegion* _sh_region; | |
2490 public: | |
2491 CheckClaimValuesClosure(jint claim_value) : | |
2492 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2493 bool doHeapRegion(HeapRegion* r) { | |
2494 if (r->claim_value() != _claim_value) { | |
2495 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2496 "claim value = %d, should be %d", | |
2497 r->bottom(), r->end(), r->claim_value(), | |
2498 _claim_value); | |
2499 ++_failures; | |
2500 } | |
2501 if (!r->isHumongous()) { | |
2502 _sh_region = NULL; | |
2503 } else if (r->startsHumongous()) { | |
2504 _sh_region = r; | |
2505 } else if (r->continuesHumongous()) { | |
2506 if (r->humongous_start_region() != _sh_region) { | |
2507 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2508 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2509 r->bottom(), r->end(), | |
2510 r->humongous_start_region(), | |
2511 _sh_region); | |
2512 ++_failures; | |
342 | 2513 } |
2514 } | |
355 | 2515 return false; |
2516 } | |
2517 size_t failures() { | |
2518 return _failures; | |
2519 } | |
2520 }; | |
2521 | |
2522 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2523 CheckClaimValuesClosure cl(claim_value); | |
2524 heap_region_iterate(&cl); | |
2525 return cl.failures() == 0; | |
2526 } | |
2527 #endif // ASSERT | |
342 | 2528 |
2529 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2530 HeapRegion* r = g1_policy()->collection_set(); | |
2531 while (r != NULL) { | |
2532 HeapRegion* next = r->next_in_collection_set(); | |
2533 if (cl->doHeapRegion(r)) { | |
2534 cl->incomplete(); | |
2535 return; | |
2536 } | |
2537 r = next; | |
2538 } | |
2539 } | |
2540 | |
2541 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2542 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2543 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2544 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2545 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2546 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2547 |
342 | 2548 assert(r->in_collection_set(), |
2549 "Start region must be a member of the collection set."); | |
2550 HeapRegion* cur = r; | |
2551 while (cur != NULL) { | |
2552 HeapRegion* next = cur->next_in_collection_set(); | |
2553 if (cl->doHeapRegion(cur) && false) { | |
2554 cl->incomplete(); | |
2555 return; | |
2556 } | |
2557 cur = next; | |
2558 } | |
2559 cur = g1_policy()->collection_set(); | |
2560 while (cur != r) { | |
2561 HeapRegion* next = cur->next_in_collection_set(); | |
2562 if (cl->doHeapRegion(cur) && false) { | |
2563 cl->incomplete(); | |
2564 return; | |
2565 } | |
2566 cur = next; | |
2567 } | |
2568 } | |
2569 | |
2570 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2571 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2572 } | |
2573 | |
2574 | |
2575 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2576 Space* res = heap_region_containing(addr); | |
2577 if (res == NULL) | |
2578 res = perm_gen()->space_containing(addr); | |
2579 return res; | |
2580 } | |
2581 | |
2582 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2583 Space* sp = space_containing(addr); | |
2584 if (sp != NULL) { | |
2585 return sp->block_start(addr); | |
2586 } | |
2587 return NULL; | |
2588 } | |
2589 | |
2590 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2591 Space* sp = space_containing(addr); | |
2592 assert(sp != NULL, "block_size of address outside of heap"); | |
2593 return sp->block_size(addr); | |
2594 } | |
2595 | |
2596 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2597 Space* sp = space_containing(addr); | |
2598 return sp->block_is_obj(addr); | |
2599 } | |
2600 | |
2601 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2602 return true; | |
2603 } | |
2604 | |
2605 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2606 return HeapRegion::GrainBytes; | |
2607 } | |
2608 | |
2609 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2610 // Return the remaining space in the cur alloc region, but not less than | |
2611 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2612 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2613 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2614 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2615 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2616 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2617 HeapRegion* hr = _mutator_alloc_region.get(); |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2618 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2619 if (hr == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2620 return max_tlab_size; |
342 | 2621 } else { |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2622 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); |
342 | 2623 } |
2624 } | |
2625 | |
2626 size_t G1CollectedHeap::large_typearray_limit() { | |
2627 // FIXME | |
2628 return HeapRegion::GrainBytes/HeapWordSize; | |
2629 } | |
2630 | |
2631 size_t G1CollectedHeap::max_capacity() const { | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
2632 return _g1_reserved.byte_size(); |
342 | 2633 } |
2634 | |
2635 jlong G1CollectedHeap::millis_since_last_gc() { | |
2636 // assert(false, "NYI"); | |
2637 return 0; | |
2638 } | |
2639 | |
2640 void G1CollectedHeap::prepare_for_verify() { | |
2641 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2642 ensure_parsability(false); | |
2643 } | |
2644 g1_rem_set()->prepare_for_verify(); | |
2645 } | |
2646 | |
2647 class VerifyLivenessOopClosure: public OopClosure { | |
2648 G1CollectedHeap* g1h; | |
2649 public: | |
2650 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2651 g1h = _g1h; | |
2652 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2653 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2654 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2655 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2656 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2657 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2658 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2659 "Dead object referenced by a not dead object"); |
342 | 2660 } |
2661 }; | |
2662 | |
2663 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2664 private: |
342 | 2665 G1CollectedHeap* _g1h; |
2666 size_t _live_bytes; | |
2667 HeapRegion *_hr; | |
811 | 2668 bool _use_prev_marking; |
342 | 2669 public: |
811 | 2670 // use_prev_marking == true -> use "prev" marking information, |
2671 // use_prev_marking == false -> use "next" marking information | |
2672 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2673 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2674 _g1h = G1CollectedHeap::heap(); |
2675 } | |
2676 void do_object(oop o) { | |
2677 VerifyLivenessOopClosure isLive(_g1h); | |
2678 assert(o != NULL, "Huh?"); | |
811 | 2679 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2680 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2681 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2682 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2683 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2684 } |
342 | 2685 } |
2686 } | |
2687 size_t live_bytes() { return _live_bytes; } | |
2688 }; | |
2689 | |
2690 class PrintObjsInRegionClosure : public ObjectClosure { | |
2691 HeapRegion *_hr; | |
2692 G1CollectedHeap *_g1; | |
2693 public: | |
2694 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2695 _g1 = G1CollectedHeap::heap(); | |
2696 }; | |
2697 | |
2698 void do_object(oop o) { | |
2699 if (o != NULL) { | |
2700 HeapWord *start = (HeapWord *) o; | |
2701 size_t word_sz = o->size(); | |
2702 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2703 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2704 (void*) o, word_sz, | |
2705 _g1->isMarkedPrev(o), | |
2706 _g1->isMarkedNext(o), | |
2707 _hr->obj_allocated_since_prev_marking(o)); | |
2708 HeapWord *end = start + word_sz; | |
2709 HeapWord *cur; | |
2710 int *val; | |
2711 for (cur = start; cur < end; cur++) { | |
2712 val = (int *) cur; | |
2713 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2714 } | |
2715 } | |
2716 } | |
2717 }; | |
2718 | |
2719 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2720 private: |
342 | 2721 bool _allow_dirty; |
390 | 2722 bool _par; |
811 | 2723 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2724 bool _failures; |
811 | 2725 public: |
2726 // use_prev_marking == true -> use "prev" marking information, | |
2727 // use_prev_marking == false -> use "next" marking information | |
2728 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2729 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2730 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2731 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2732 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2733 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2734 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2735 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2736 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2737 |
342 | 2738 bool doHeapRegion(HeapRegion* r) { |
390 | 2739 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2740 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2741 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2742 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2743 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2744 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2745 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2746 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2747 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2748 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2749 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2750 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2751 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2752 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2753 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2754 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2755 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2756 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2757 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2758 } |
342 | 2759 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2760 return false; // stop the region iteration if we hit a failure |
342 | 2761 } |
2762 }; | |
2763 | |
2764 class VerifyRootsClosure: public OopsInGenClosure { | |
2765 private: | |
2766 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2767 bool _use_prev_marking; |
342 | 2768 bool _failures; |
2769 public: | |
811 | 2770 // use_prev_marking == true -> use "prev" marking information, |
2771 // use_prev_marking == false -> use "next" marking information | |
2772 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2773 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2774 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2775 _failures(false) { } |
342 | 2776 |
2777 bool failures() { return _failures; } | |
2778 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2779 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2780 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2781 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2782 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2783 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2784 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2785 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2786 obj->print_on(gclog_or_tty); |
2787 _failures = true; | |
2788 } | |
2789 } | |
2790 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2791 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2792 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2793 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2794 }; |
2795 | |
390 | 2796 // This is the task used for parallel heap verification. |
2797 | |
2798 class G1ParVerifyTask: public AbstractGangTask { | |
2799 private: | |
2800 G1CollectedHeap* _g1h; | |
2801 bool _allow_dirty; | |
811 | 2802 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2803 bool _failures; |
390 | 2804 |
2805 public: | |
811 | 2806 // use_prev_marking == true -> use "prev" marking information, |
2807 // use_prev_marking == false -> use "next" marking information | |
2808 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2809 bool use_prev_marking) : | |
390 | 2810 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2811 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2812 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2813 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2814 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2815 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2816 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2817 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2818 } |
390 | 2819 |
2820 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2821 HandleMark hm; |
811 | 2822 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2823 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2824 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2825 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2826 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2827 } |
390 | 2828 } |
2829 }; | |
2830 | |
342 | 2831 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2832 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2833 } | |
2834 | |
2835 void G1CollectedHeap::verify(bool allow_dirty, | |
2836 bool silent, | |
2837 bool use_prev_marking) { | |
342 | 2838 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2839 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } |
811 | 2840 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2841 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2842 // We apply the relevant closures to all the oops in the |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2843 // system dictionary, the string table and the code cache. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2844 const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2845 process_strong_roots(true, // activate StrongRootsScope |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2846 true, // we set "collecting perm gen" to true, |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2847 // so we don't reset the dirty cards in the perm gen. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2848 SharedHeap::ScanningOption(so), // roots scanning options |
342 | 2849 &rootsCl, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2850 &blobsCl, |
342 | 2851 &rootsCl); |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2852 // Since we used "collecting_perm_gen" == true above, we will not have |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2853 // checked the refs from perm into the G1-collected heap. We check those |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2854 // references explicitly below. Whether the relevant cards are dirty |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2855 // is checked further below in the rem set verification. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2856 if (!silent) { gclog_or_tty->print("Permgen roots "); } |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2857 perm_gen()->oop_iterate(&rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2858 bool failures = rootsCl.failures(); |
2152 | 2859 if (!silent) { gclog_or_tty->print("HeapRegionSets "); } |
2860 verify_region_sets(); | |
2861 if (!silent) { gclog_or_tty->print("HeapRegions "); } | |
390 | 2862 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2863 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2864 "sanity check"); | |
2865 | |
811 | 2866 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2867 int n_workers = workers()->total_workers(); |
2868 set_par_threads(n_workers); | |
2869 workers()->run_task(&task); | |
2870 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2871 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2872 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2873 } |
390 | 2874 |
2875 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2876 "sanity check"); | |
2877 | |
2878 reset_heap_region_claim_values(); | |
2879 | |
2880 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2881 "sanity check"); | |
2882 } else { | |
811 | 2883 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2884 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2885 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2886 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2887 } |
390 | 2888 } |
2152 | 2889 if (!silent) gclog_or_tty->print("RemSet "); |
342 | 2890 rem_set()->verify(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2891 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2892 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2893 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2894 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2895 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2896 #ifndef PRODUCT |
1044 | 2897 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2898 concurrent_mark()->print_reachable("at-verification-failure", |
2899 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2900 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2901 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2902 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2903 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2904 guarantee(!failures, "there should not have been any failures"); |
342 | 2905 } else { |
2906 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2907 } | |
2908 } | |
2909 | |
2910 class PrintRegionClosure: public HeapRegionClosure { | |
2911 outputStream* _st; | |
2912 public: | |
2913 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2914 bool doHeapRegion(HeapRegion* r) { | |
2915 r->print_on(_st); | |
2916 return false; | |
2917 } | |
2918 }; | |
2919 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2920 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2921 |
2922 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2923 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2924 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2925 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2926 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2927 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2928 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2929 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2930 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2931 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2932 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2933 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2934 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2935 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2936 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2937 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2938 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2939 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2940 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2941 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2942 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2943 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2944 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2945 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2946 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2947 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2948 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2949 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2950 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2951 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2952 PrintRegionClosure blk(st); |
2953 _hrs->iterate(&blk); | |
2954 } | |
2955 | |
2956 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2957 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2958 workers()->print_worker_threads_on(st); |
2959 } | |
2960 _cmThread->print_on(st); | |
342 | 2961 st->cr(); |
1019 | 2962 _cm->print_worker_threads_on(st); |
2963 _cg1r->print_worker_threads_on(st); | |
342 | 2964 st->cr(); |
2965 } | |
2966 | |
2967 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2968 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 2969 workers()->threads_do(tc); |
2970 } | |
2971 tc->do_thread(_cmThread); | |
794 | 2972 _cg1r->threads_do(tc); |
342 | 2973 } |
2974 | |
2975 void G1CollectedHeap::print_tracing_info() const { | |
2976 // We'll overload this to mean "trace GC pause statistics." | |
2977 if (TraceGen0Time || TraceGen1Time) { | |
2978 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2979 // to that. | |
2980 g1_policy()->print_tracing_info(); | |
2981 } | |
751 | 2982 if (G1SummarizeRSetStats) { |
342 | 2983 g1_rem_set()->print_summary_info(); |
2984 } | |
1282 | 2985 if (G1SummarizeConcMark) { |
342 | 2986 concurrent_mark()->print_summary_info(); |
2987 } | |
2988 g1_policy()->print_yg_surv_rate_info(); | |
2989 SpecializationStats::print(); | |
2990 } | |
2991 | |
2992 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2993 HeapRegion* hr = heap_region_containing(addr); | |
2994 if (hr == NULL) { | |
2995 return 0; | |
2996 } else { | |
2997 return 1; | |
2998 } | |
2999 } | |
3000 | |
3001 G1CollectedHeap* G1CollectedHeap::heap() { | |
3002 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
3003 "not a garbage-first heap"); | |
3004 return _g1h; | |
3005 } | |
3006 | |
3007 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3008 // always_do_update_barrier = false; |
342 | 3009 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3010 // Call allocation profiler | |
3011 AllocationProfiler::iterate_since_last_gc(); | |
3012 // Fill TLAB's and such | |
3013 ensure_parsability(true); | |
3014 } | |
3015 | |
3016 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
3017 // FIXME: what is this about? | |
3018 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
3019 // is set. | |
3020 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
3021 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3022 // always_do_update_barrier = true; |
342 | 3023 } |
3024 | |
1973 | 3025 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3026 unsigned int gc_count_before, | |
3027 bool* succeeded) { | |
3028 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3029 g1_policy()->record_stop_world_start(); |
1973 | 3030 VM_G1IncCollectionPause op(gc_count_before, |
3031 word_size, | |
3032 false, /* should_initiate_conc_mark */ | |
3033 g1_policy()->max_pause_time_ms(), | |
3034 GCCause::_g1_inc_collection_pause); | |
3035 VMThread::execute(&op); | |
3036 | |
3037 HeapWord* result = op.result(); | |
3038 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3039 assert(result == NULL || ret_succeeded, | |
3040 "the result should be NULL if the VM did not succeed"); | |
3041 *succeeded = ret_succeeded; | |
3042 | |
3043 assert_heap_not_locked(); | |
3044 return result; | |
342 | 3045 } |
3046 | |
3047 void | |
3048 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3049 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3050 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3051 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3052 CGC_lock->notify(); |
342 | 3053 } |
3054 } | |
3055 | |
3056 class VerifyMarkedObjsClosure: public ObjectClosure { | |
3057 G1CollectedHeap* _g1h; | |
3058 public: | |
3059 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
3060 void do_object(oop obj) { | |
3061 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
3062 "markandsweep mark should agree with concurrent deadness"); | |
3063 } | |
3064 }; | |
3065 | |
3066 void | |
3067 G1CollectedHeap::checkConcurrentMark() { | |
3068 VerifyMarkedObjsClosure verifycl(this); | |
3069 // MutexLockerEx x(getMarkBitMapLock(), | |
3070 // Mutex::_no_safepoint_check_flag); | |
678 | 3071 object_iterate(&verifycl, false); |
342 | 3072 } |
3073 | |
3074 void G1CollectedHeap::do_sync_mark() { | |
3075 _cm->checkpointRootsInitial(); | |
3076 _cm->markFromRoots(); | |
3077 _cm->checkpointRootsFinal(false); | |
3078 } | |
3079 | |
3080 // <NEW PREDICTION> | |
3081 | |
3082 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3083 bool young) { | |
3084 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3085 } | |
3086 | |
3087 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3088 predicted_time_ms) { | |
3089 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3090 } | |
3091 | |
3092 size_t G1CollectedHeap::pending_card_num() { | |
3093 size_t extra_cards = 0; | |
3094 JavaThread *curr = Threads::first(); | |
3095 while (curr != NULL) { | |
3096 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3097 extra_cards += dcq.size(); | |
3098 curr = curr->next(); | |
3099 } | |
3100 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3101 size_t buffer_size = dcqs.buffer_size(); | |
3102 size_t buffer_num = dcqs.completed_buffers_num(); | |
3103 return buffer_size * buffer_num + extra_cards; | |
3104 } | |
3105 | |
3106 size_t G1CollectedHeap::max_pending_card_num() { | |
3107 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3108 size_t buffer_size = dcqs.buffer_size(); | |
3109 size_t buffer_num = dcqs.completed_buffers_num(); | |
3110 int thread_num = Threads::number_of_threads(); | |
3111 return (buffer_num + thread_num) * buffer_size; | |
3112 } | |
3113 | |
3114 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3115 return g1_rem_set()->cardsScanned(); |
342 | 3116 } |
3117 | |
3118 void | |
3119 G1CollectedHeap::setup_surviving_young_words() { | |
3120 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3121 size_t array_length = g1_policy()->young_cset_length(); | |
3122 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3123 if (_surviving_young_words == NULL) { | |
3124 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3125 "Not enough space for young surv words summary."); | |
3126 } | |
3127 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3128 #ifdef ASSERT |
342 | 3129 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3130 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3131 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3132 #endif // !ASSERT |
342 | 3133 } |
3134 | |
3135 void | |
3136 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3137 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3138 size_t array_length = g1_policy()->young_cset_length(); | |
3139 for (size_t i = 0; i < array_length; ++i) | |
3140 _surviving_young_words[i] += surv_young_words[i]; | |
3141 } | |
3142 | |
3143 void | |
3144 G1CollectedHeap::cleanup_surviving_young_words() { | |
3145 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3146 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3147 _surviving_young_words = NULL; | |
3148 } | |
3149 | |
3150 // </NEW PREDICTION> | |
3151 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3152 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3153 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3154 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3155 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3156 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3157 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3158 |
1709 | 3159 #if TASKQUEUE_STATS |
3160 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3161 st->print_raw_cr("GC Task Stats"); | |
3162 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3163 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3164 } | |
3165 | |
3166 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3167 print_taskqueue_stats_hdr(st); | |
3168 | |
3169 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3170 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3171 for (int i = 0; i < n; ++i) { |
3172 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3173 totals += task_queue(i)->stats; | |
3174 } | |
3175 st->print_raw("tot "); totals.print(st); st->cr(); | |
3176 | |
3177 DEBUG_ONLY(totals.verify()); | |
3178 } | |
3179 | |
3180 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3181 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3182 for (int i = 0; i < n; ++i) { |
3183 task_queue(i)->stats.reset(); | |
3184 } | |
3185 } | |
3186 #endif // TASKQUEUE_STATS | |
3187 | |
1973 | 3188 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3189 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
2152 | 3190 assert_at_safepoint(true /* should_be_vm_thread */); |
3191 guarantee(!is_gc_active(), "collection is not reentrant"); | |
3192 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3193 if (GC_locker::check_active_before_gc()) { |
1973 | 3194 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3195 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3196 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
3197 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
2039
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3198 ResourceMark rm; |
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3199 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3200 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3201 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3202 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3203 |
2152 | 3204 verify_region_sets_optional(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3205 verify_dirty_young_regions(); |
2152 | 3206 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3207 { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3208 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3209 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3210 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3211 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3212 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3213 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3214 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3215 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3216 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3217 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3218 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3219 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3220 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3221 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3222 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3223 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3224 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3225 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3226 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3227 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3228 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3229 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3230 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3231 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3232 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3233 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3234 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3235 TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3323
diff
changeset
|
3236 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3237 |
2361 | 3238 // If the secondary_free_list is not empty, append it to the |
3239 // free_list. No need to wait for the cleanup operation to finish; | |
3240 // the region allocation code will check the secondary_free_list | |
3241 // and wait if necessary. If the G1StressConcRegionFreeing flag is | |
3242 // set, skip this step so that the region allocation code has to | |
3243 // get entries from the secondary_free_list. | |
2152 | 3244 if (!G1StressConcRegionFreeing) { |
2361 | 3245 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 3246 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3247 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3248 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3249 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3250 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3251 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3252 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3253 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3254 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3255 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3256 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3257 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3258 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3259 increment_total_collections(false /* full gc */); |
342 | 3260 |
3261 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3262 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3263 print(); |
342 | 3264 #endif |
3265 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3267 HandleMark hm; // Discard invalid handles created during verification |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3268 gclog_or_tty->print(" VerifyBeforeGC:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3269 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3270 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3271 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3272 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3273 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3274 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3275 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3276 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3277 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3278 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3279 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3280 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3281 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3282 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3283 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3284 // of the collection set!). |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3285 release_mutator_alloc_region(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3286 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3287 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3288 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3289 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3290 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3291 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3292 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3293 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3294 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3295 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3296 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3297 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3298 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3299 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3300 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3301 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3302 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3303 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3304 #endif // YOUNG_LIST_VERBOSE |
342 | 3305 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3306 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3307 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3308 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3309 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3310 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3311 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3312 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3313 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3314 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3315 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3316 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3317 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3318 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3319 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3320 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3321 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3322 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3323 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3324 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3325 |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3326 if (mark_in_progress()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3327 concurrent_mark()->newCSet(); |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3328 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3329 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3330 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3331 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3332 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3333 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3334 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3335 |
1707 | 3336 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3337 |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3338 // We have chosen the complete collection set. If marking is |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3339 // active then, we clear the region fields of any of the |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3340 // concurrent marking tasks whose region fields point into |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3341 // the collection set as these values will become stale. This |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3342 // will cause the owning marking threads to claim a new region |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3343 // when marking restarts. |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3344 if (mark_in_progress()) { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3345 concurrent_mark()->reset_active_task_region_fields_in_cset(); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3346 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3347 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3348 // Nothing to do if we were unable to choose a collection set. |
342 | 3349 #if G1_REM_SET_LOGGING |
1707 | 3350 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3351 print(); | |
342 | 3352 #endif |
1707 | 3353 PrepareForRSScanningClosure prepare_for_rs_scan; |
3354 collection_set_iterate(&prepare_for_rs_scan); | |
3355 | |
3356 setup_surviving_young_words(); | |
3357 | |
3358 // Set up the gc allocation regions. | |
3359 get_gc_alloc_regions(); | |
3360 | |
3361 // Actually do the work... | |
3362 evacuate_collection_set(); | |
3363 | |
3364 free_collection_set(g1_policy()->collection_set()); | |
3365 g1_policy()->clear_collection_set(); | |
3366 | |
3367 cleanup_surviving_young_words(); | |
3368 | |
3369 // Start a new incremental collection set for the next pause. | |
3370 g1_policy()->start_incremental_cset_building(); | |
3371 | |
3372 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3373 // regions to the incremental collection set for the next | |
3374 // evacuation pause. | |
3375 clear_cset_fast_test(); | |
3376 | |
3377 if (g1_policy()->in_young_gc_mode()) { | |
3378 _young_list->reset_sampled_info(); | |
3379 | |
3380 // Don't check the whole heap at this point as the | |
3381 // GC alloc regions from this pause have been tagged | |
3382 // as survivors and moved on to the survivor list. | |
3383 // Survivor regions will fail the !is_young() check. | |
3384 assert(check_young_list_empty(false /* check_heap */), | |
3385 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3386 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3387 #if YOUNG_LIST_VERBOSE |
1707 | 3388 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3389 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3390 #endif // YOUNG_LIST_VERBOSE |
342 | 3391 |
1707 | 3392 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3393 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3394 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3395 |
1707 | 3396 _young_list->reset_auxilary_lists(); |
342 | 3397 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3398 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3399 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3400 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3401 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3402 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3403 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3404 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3405 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3406 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3407 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3408 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3409 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3410 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3411 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3412 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3413 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3414 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3415 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3416 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3417 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3418 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3419 } |
342 | 3420 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3421 allocate_dummy_regions(); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3422 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3423 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3424 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3425 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3426 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3427 #endif // YOUNG_LIST_VERBOSE |
342 | 3428 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3429 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3430 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3431 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3432 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3433 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3434 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3435 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3436 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3437 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3438 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3439 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3440 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3441 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3442 Universe::verify(false); |
342 | 3443 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3444 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3445 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3446 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3447 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3448 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3449 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3450 size_t bytes_before = capacity(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3451 if (!expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3452 // We failed to expand the heap so let's verify that |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3453 // committed/uncommitted amount match the backing store |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3454 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3455 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3456 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3457 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3458 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3459 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3460 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3461 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3462 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3463 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3464 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3465 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3466 #endif |
342 | 3467 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3468 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3469 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3470 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3471 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3472 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3473 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3474 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3475 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3476 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3477 |
2152 | 3478 verify_region_sets_optional(); |
3479 | |
1709 | 3480 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3481 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3482 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3483 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3484 Universe::print_heap_after_gc(); |
342 | 3485 } |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3486 g1mm()->update_counters(); |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3487 |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3488 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3489 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3490 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3491 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3492 } |
1973 | 3493 |
3494 return true; | |
342 | 3495 } |
3496 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3497 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3498 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3499 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3500 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3501 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3502 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3503 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3504 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3505 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3506 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3507 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3508 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3509 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3510 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3511 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3512 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3513 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3514 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3515 void G1CollectedHeap::init_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3516 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3517 _mutator_alloc_region.init(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3518 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3519 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3520 void G1CollectedHeap::release_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3521 _mutator_alloc_region.release(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3522 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3523 } |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3524 |
342 | 3525 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3526 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3527 // make sure we don't call set_gc_alloc_region() multiple times on |
3528 // the same region | |
3529 assert(r == NULL || !r->is_gc_alloc_region(), | |
3530 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3531 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3532 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3533 |
342 | 3534 HeapWord* original_top = NULL; |
3535 if (r != NULL) | |
3536 original_top = r->top(); | |
3537 | |
3538 // We will want to record the used space in r as being there before gc. | |
3539 // One we install it as a GC alloc region it's eligible for allocation. | |
3540 // So record it now and use it later. | |
3541 size_t r_used = 0; | |
3542 if (r != NULL) { | |
3543 r_used = r->used(); | |
3544 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3545 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3546 // need to take the lock to guard against two threads calling |
3547 // get_gc_alloc_region concurrently (very unlikely but...) | |
3548 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3549 r->save_marks(); | |
3550 } | |
3551 } | |
3552 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3553 _gc_alloc_regions[purpose] = r; | |
3554 if (old_alloc_region != NULL) { | |
3555 // Replace aliases too. | |
3556 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3557 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3558 _gc_alloc_regions[ap] = r; | |
3559 } | |
3560 } | |
3561 } | |
3562 if (r != NULL) { | |
3563 push_gc_alloc_region(r); | |
3564 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3565 // We are using a region as a GC alloc region after it has been used | |
3566 // as a mutator allocation region during the current marking cycle. | |
3567 // The mutator-allocated objects are currently implicitly marked, but | |
3568 // when we move hr->next_top_at_mark_start() forward at the the end | |
3569 // of the GC pause, they won't be. We therefore mark all objects in | |
3570 // the "gap". We do this object-by-object, since marking densely | |
3571 // does not currently work right with marking bitmap iteration. This | |
3572 // means we rely on TLAB filling at the start of pauses, and no | |
3573 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3574 // to fix the marking bitmap iteration. | |
3575 HeapWord* curhw = r->next_top_at_mark_start(); | |
3576 HeapWord* t = original_top; | |
3577 | |
3578 while (curhw < t) { | |
3579 oop cur = (oop)curhw; | |
3580 // We'll assume parallel for generality. This is rare code. | |
3581 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3582 curhw = curhw + cur->size(); | |
3583 } | |
3584 assert(curhw == t, "Should have parsed correctly."); | |
3585 } | |
3586 if (G1PolicyVerbose > 1) { | |
3587 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3588 "for survivors:", r->bottom(), original_top, r->end()); | |
3589 r->print(); | |
3590 } | |
3591 g1_policy()->record_before_bytes(r_used); | |
3592 } | |
3593 } | |
3594 | |
3595 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3596 assert(Thread::current()->is_VM_thread() || | |
2152 | 3597 FreeList_lock->owned_by_self(), "Precondition"); |
342 | 3598 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
3599 "Precondition."); | |
3600 hr->set_is_gc_alloc_region(true); | |
3601 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3602 _gc_alloc_region_list = hr; | |
3603 } | |
3604 | |
3605 #ifdef G1_DEBUG | |
3606 class FindGCAllocRegion: public HeapRegionClosure { | |
3607 public: | |
3608 bool doHeapRegion(HeapRegion* r) { | |
3609 if (r->is_gc_alloc_region()) { | |
3610 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3611 r->hrs_index(), r->bottom()); | |
3612 } | |
3613 return false; | |
3614 } | |
3615 }; | |
3616 #endif // G1_DEBUG | |
3617 | |
3618 void G1CollectedHeap::forget_alloc_region_list() { | |
2152 | 3619 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 3620 while (_gc_alloc_region_list != NULL) { |
3621 HeapRegion* r = _gc_alloc_region_list; | |
3622 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3623 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3624 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3625 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3626 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3627 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3628 r->ContiguousSpace::set_saved_mark(); |
342 | 3629 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3630 r->set_next_gc_alloc_region(NULL); | |
3631 r->set_is_gc_alloc_region(false); | |
545 | 3632 if (r->is_survivor()) { |
3633 if (r->is_empty()) { | |
3634 r->set_not_young(); | |
3635 } else { | |
3636 _young_list->add_survivor_region(r); | |
3637 } | |
3638 } | |
342 | 3639 } |
3640 #ifdef G1_DEBUG | |
3641 FindGCAllocRegion fa; | |
3642 heap_region_iterate(&fa); | |
3643 #endif // G1_DEBUG | |
3644 } | |
3645 | |
3646 | |
3647 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3648 // TODO: allocation regions check | |
3649 return true; | |
3650 } | |
3651 | |
3652 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3653 // First, let's check that the GC alloc region list is empty (it should) |
3654 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3655 | |
342 | 3656 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3657 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3658 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3659 |
342 | 3660 // Create new GC alloc regions. |
636 | 3661 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3662 _retained_gc_alloc_regions[ap] = NULL; | |
3663 | |
3664 if (alloc_region != NULL) { | |
3665 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3666 | |
3667 // let's make sure that the GC alloc region is not tagged as such | |
3668 // outside a GC operation | |
3669 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3670 | |
3671 if (alloc_region->in_collection_set() || | |
3672 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3673 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3674 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3675 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3676 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3677 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3678 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3679 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3680 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3681 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3682 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3683 // object that may be less than the region size). |
636 | 3684 |
3685 alloc_region = NULL; | |
3686 } | |
3687 } | |
3688 | |
3689 if (alloc_region == NULL) { | |
3690 // we will get a new GC alloc region | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3691 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3692 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3693 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3694 ++_gc_alloc_region_counts[ap]; |
1388 | 3695 if (G1PrintHeapRegions) { |
3696 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3697 "top "PTR_FORMAT, | |
3698 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3699 } | |
342 | 3700 } |
636 | 3701 |
342 | 3702 if (alloc_region != NULL) { |
636 | 3703 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3704 set_gc_alloc_region(ap, alloc_region); |
3705 } | |
636 | 3706 |
3707 assert(_gc_alloc_regions[ap] == NULL || | |
3708 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3709 "the GC alloc region should be tagged as such"); | |
3710 assert(_gc_alloc_regions[ap] == NULL || | |
3711 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3712 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3713 } |
3714 // Set alternative regions for allocation purposes that have reached | |
636 | 3715 // their limit. |
342 | 3716 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3717 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3718 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3719 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3720 } | |
3721 } | |
3722 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3723 } | |
3724 | |
636 | 3725 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3726 // We keep a separate list of all regions that have been alloc regions in |
636 | 3727 // the current collection pause. Forget that now. This method will |
3728 // untag the GC alloc regions and tear down the GC alloc region | |
3729 // list. It's desirable that no regions are tagged as GC alloc | |
3730 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3731 |
342 | 3732 forget_alloc_region_list(); |
3733 | |
3734 // The current alloc regions contain objs that have survived | |
3735 // collection. Make them no longer GC alloc regions. | |
3736 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3737 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3738 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3739 _gc_alloc_region_counts[ap] = 0; |
636 | 3740 |
3741 if (r != NULL) { | |
3742 // we retain nothing on _gc_alloc_regions between GCs | |
3743 set_gc_alloc_region(ap, NULL); | |
3744 | |
3745 if (r->is_empty()) { | |
2152 | 3746 // We didn't actually allocate anything in it; let's just put |
3747 // it back on the free list. | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
3748 _free_list.add_as_head(r); |
636 | 3749 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3750 // retain it so that we can use it at the beginning of the next GC | |
3751 _retained_gc_alloc_regions[ap] = r; | |
342 | 3752 } |
3753 } | |
636 | 3754 } |
3755 } | |
3756 | |
3757 #ifndef PRODUCT | |
3758 // Useful for debugging | |
3759 | |
3760 void G1CollectedHeap::print_gc_alloc_regions() { | |
3761 gclog_or_tty->print_cr("GC alloc regions"); | |
3762 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3763 HeapRegion* r = _gc_alloc_regions[ap]; | |
3764 if (r == NULL) { | |
3765 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3766 } else { | |
3767 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3768 ap, r->bottom(), r->used()); | |
3769 } | |
3770 } | |
3771 } | |
3772 #endif // PRODUCT | |
342 | 3773 |
3774 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3775 _drain_in_progress = false; | |
3776 set_evac_failure_closure(cl); | |
3777 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3778 } | |
3779 | |
3780 void G1CollectedHeap::finalize_for_evac_failure() { | |
3781 assert(_evac_failure_scan_stack != NULL && | |
3782 _evac_failure_scan_stack->length() == 0, | |
3783 "Postcondition"); | |
3784 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3785 delete _evac_failure_scan_stack; |
342 | 3786 _evac_failure_scan_stack = NULL; |
3787 } | |
3788 | |
3789 | |
3790 | |
3791 // *** Sequential G1 Evacuation | |
3792 | |
3793 class G1IsAliveClosure: public BoolObjectClosure { | |
3794 G1CollectedHeap* _g1; | |
3795 public: | |
3796 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3797 void do_object(oop p) { assert(false, "Do not call."); } | |
3798 bool do_object_b(oop p) { | |
3799 // It is reachable if it is outside the collection set, or is inside | |
3800 // and forwarded. | |
3801 | |
3802 #ifdef G1_DEBUG | |
3803 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3804 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3805 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3806 #endif // G1_DEBUG | |
3807 | |
3808 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3809 } | |
3810 }; | |
3811 | |
3812 class G1KeepAliveClosure: public OopClosure { | |
3813 G1CollectedHeap* _g1; | |
3814 public: | |
3815 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3816 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3817 void do_oop( oop* p) { |
342 | 3818 oop obj = *p; |
3819 #ifdef G1_DEBUG | |
3820 if (PrintGC && Verbose) { | |
3821 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3822 p, (void*) obj, (void*) *p); | |
3823 } | |
3824 #endif // G1_DEBUG | |
3825 | |
3826 if (_g1->obj_in_cs(obj)) { | |
3827 assert( obj->is_forwarded(), "invariant" ); | |
3828 *p = obj->forwardee(); | |
3829 #ifdef G1_DEBUG | |
3830 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3831 (void*) obj, (void*) *p); | |
3832 #endif // G1_DEBUG | |
3833 } | |
3834 } | |
3835 }; | |
3836 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3837 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3838 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3839 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3840 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3841 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3842 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3843 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3844 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3845 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3846 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3847 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3848 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3849 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3850 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3851 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3852 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3853 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3854 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3855 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3856 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3857 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3858 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3859 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3860 |
342 | 3861 class RemoveSelfPointerClosure: public ObjectClosure { |
3862 private: | |
3863 G1CollectedHeap* _g1; | |
3864 ConcurrentMark* _cm; | |
3865 HeapRegion* _hr; | |
3866 size_t _prev_marked_bytes; | |
3867 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3868 OopsInHeapRegionClosure *_cl; |
342 | 3869 public: |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3870 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3871 OopsInHeapRegionClosure* cl) : |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3872 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3873 _next_marked_bytes(0), _cl(cl) {} |
342 | 3874 |
3875 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3876 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3877 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3878 // <original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3879 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3880 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3881 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3882 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3883 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3884 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3885 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3886 // would point into middle of the filler object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3887 // The current approach is to not coalesce and leave the BOT contents intact. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3888 // </original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3889 // |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3890 // We now reset the BOT when we start the object iteration over the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3891 // region and refine its entries for every object we come across. So |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3892 // the above comment is not really relevant and we should be able |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3893 // to coalesce dead objects if we want to. |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3894 void do_object(oop obj) { |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3895 HeapWord* obj_addr = (HeapWord*) obj; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3896 assert(_hr->is_in(obj_addr), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3897 size_t obj_size = obj->size(); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3898 _hr->update_bot_for_object(obj_addr, obj_size); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3899 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3900 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3901 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3902 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3903 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3904 _prev_marked_bytes += (obj_size * HeapWordSize); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3905 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3906 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3907 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3908 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3909 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3910 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3911 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3912 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3913 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3914 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3915 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3916 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3917 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3918 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3919 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3920 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3921 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3922 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3923 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3924 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3925 // dummy object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3926 MemRegion mr((HeapWord*)obj, obj_size); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3927 CollectedHeap::fill_with_object(mr); |
342 | 3928 _cm->clearRangeBothMaps(mr); |
3929 } | |
3930 } | |
3931 }; | |
3932 | |
3933 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3934 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3935 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3936 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3937 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3938 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3939 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3940 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3941 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3942 } |
342 | 3943 HeapRegion* cur = g1_policy()->collection_set(); |
3944 while (cur != NULL) { | |
3945 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3946 assert(!cur->isHumongous(), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3947 |
342 | 3948 if (cur->evacuation_failed()) { |
3949 assert(cur->in_collection_set(), "bad CS"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3950 RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3951 |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3952 cur->reset_bot(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3953 cl->set_region(cur); |
342 | 3954 cur->object_iterate(&rspc); |
3955 | |
3956 // A number of manipulations to make the TAMS be the current top, | |
3957 // and the marked bytes be the ones observed in the iteration. | |
3958 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3959 // The comments below are the postconditions achieved by the | |
3960 // calls. Note especially the last such condition, which says that | |
3961 // the count of marked bytes has been properly restored. | |
3962 cur->note_start_of_marking(false); | |
3963 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3964 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3965 // _next_marked_bytes == prev_marked_bytes. | |
3966 cur->note_end_of_marking(); | |
3967 // _prev_top_at_mark_start == top(), | |
3968 // _prev_marked_bytes == prev_marked_bytes | |
3969 } | |
3970 // If there is no mark in progress, we modified the _next variables | |
3971 // above needlessly, but harmlessly. | |
3972 if (_g1h->mark_in_progress()) { | |
3973 cur->note_start_of_marking(false); | |
3974 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3975 // _next_marked_bytes == next_marked_bytes. | |
3976 } | |
3977 | |
3978 // Now make sure the region has the right index in the sorted array. | |
3979 g1_policy()->note_change_in_marked_bytes(cur); | |
3980 } | |
3981 cur = cur->next_in_collection_set(); | |
3982 } | |
3983 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3984 | |
3985 // Now restore saved marks, if any. | |
3986 if (_objs_with_preserved_marks != NULL) { | |
3987 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3988 guarantee(_objs_with_preserved_marks->length() == | |
3989 _preserved_marks_of_objs->length(), "Both or none."); | |
3990 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3991 oop obj = _objs_with_preserved_marks->at(i); | |
3992 markOop m = _preserved_marks_of_objs->at(i); | |
3993 obj->set_mark(m); | |
3994 } | |
3995 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3996 delete _objs_with_preserved_marks; | |
3997 delete _preserved_marks_of_objs; | |
3998 _objs_with_preserved_marks = NULL; | |
3999 _preserved_marks_of_objs = NULL; | |
4000 } | |
4001 } | |
4002 | |
4003 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
4004 _evac_failure_scan_stack->push(obj); | |
4005 } | |
4006 | |
4007 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
4008 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
4009 | |
4010 while (_evac_failure_scan_stack->length() > 0) { | |
4011 oop obj = _evac_failure_scan_stack->pop(); | |
4012 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
4013 obj->oop_iterate_backwards(_evac_failure_closure); | |
4014 } | |
4015 } | |
4016 | |
4017 oop | |
4018 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
4019 oop old) { | |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4020 assert(obj_in_cs(old), |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4021 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4022 (HeapWord*) old)); |
342 | 4023 markOop m = old->mark(); |
4024 oop forward_ptr = old->forward_to_atomic(old); | |
4025 if (forward_ptr == NULL) { | |
4026 // Forward-to-self succeeded. | |
4027 if (_evac_failure_closure != cl) { | |
4028 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
4029 assert(!_drain_in_progress, | |
4030 "Should only be true while someone holds the lock."); | |
4031 // Set the global evac-failure closure to the current thread's. | |
4032 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
4033 set_evac_failure_closure(cl); | |
4034 // Now do the common part. | |
4035 handle_evacuation_failure_common(old, m); | |
4036 // Reset to NULL. | |
4037 set_evac_failure_closure(NULL); | |
4038 } else { | |
4039 // The lock is already held, and this is recursive. | |
4040 assert(_drain_in_progress, "This should only be the recursive case."); | |
4041 handle_evacuation_failure_common(old, m); | |
4042 } | |
4043 return old; | |
4044 } else { | |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4045 // Forward-to-self failed. Either someone else managed to allocate |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4046 // space for this object (old != forward_ptr) or they beat us in |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4047 // self-forwarding it (old == forward_ptr). |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4048 assert(old == forward_ptr || !obj_in_cs(forward_ptr), |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4049 err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" " |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4050 "should not be in the CSet", |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4051 (HeapWord*) old, (HeapWord*) forward_ptr)); |
342 | 4052 return forward_ptr; |
4053 } | |
4054 } | |
4055 | |
4056 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
4057 set_evacuation_failed(true); | |
4058 | |
4059 preserve_mark_if_necessary(old, m); | |
4060 | |
4061 HeapRegion* r = heap_region_containing(old); | |
4062 if (!r->evacuation_failed()) { | |
4063 r->set_evacuation_failed(true); | |
1282 | 4064 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4065 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 4066 "["PTR_FORMAT","PTR_FORMAT")\n", |
4067 r, r->bottom(), r->end()); | |
4068 } | |
4069 } | |
4070 | |
4071 push_on_evac_failure_scan_stack(old); | |
4072 | |
4073 if (!_drain_in_progress) { | |
4074 // prevent recursion in copy_to_survivor_space() | |
4075 _drain_in_progress = true; | |
4076 drain_evac_failure_scan_stack(); | |
4077 _drain_in_progress = false; | |
4078 } | |
4079 } | |
4080 | |
4081 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4082 assert(evacuation_failed(), "Oversaving!"); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4083 // We want to call the "for_promotion_failure" version only in the |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4084 // case of a promotion failure. |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4085 if (m->must_be_preserved_for_promotion_failure(obj)) { |
342 | 4086 if (_objs_with_preserved_marks == NULL) { |
4087 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4088 _objs_with_preserved_marks = | |
4089 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4090 _preserved_marks_of_objs = | |
4091 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4092 } | |
4093 _objs_with_preserved_marks->push(obj); | |
4094 _preserved_marks_of_objs->push(m); | |
4095 } | |
4096 } | |
4097 | |
4098 // *** Parallel G1 Evacuation | |
4099 | |
4100 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4101 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4102 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4103 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4104 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4105 |
342 | 4106 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4107 // let the caller handle alloc failure | |
4108 if (alloc_region == NULL) return NULL; | |
4109 | |
4110 HeapWord* block = alloc_region->par_allocate(word_size); | |
4111 if (block == NULL) { | |
4112 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4113 } | |
4114 return block; | |
4115 } | |
4116 | |
545 | 4117 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4118 bool par) { | |
4119 // Another thread might have obtained alloc_region for the given | |
4120 // purpose, and might be attempting to allocate in it, and might | |
4121 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4122 // region below until we're sure the last allocation has happened. | |
4123 // We ensure this by allocating the remaining space with a garbage | |
4124 // object. | |
4125 if (par) par_allocate_remaining_space(alloc_region); | |
4126 // Now we can do the post-GC stuff on the region. | |
4127 alloc_region->note_end_of_copying(); | |
4128 g1_policy()->record_after_bytes(alloc_region->used()); | |
4129 } | |
4130 | |
342 | 4131 HeapWord* |
4132 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4133 HeapRegion* alloc_region, | |
4134 bool par, | |
4135 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4136 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4137 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4138 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4139 |
2152 | 4140 // We need to make sure we serialize calls to this method. Given |
4141 // that the FreeList_lock guards accesses to the free_list anyway, | |
4142 // and we need to potentially remove a region from it, we'll use it | |
4143 // to protect the whole call. | |
4144 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
4145 | |
342 | 4146 HeapWord* block = NULL; |
4147 // In the parallel case, a previous thread to obtain the lock may have | |
4148 // already assigned a new gc_alloc_region. | |
4149 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4150 assert(par, "But should only happen in parallel case."); | |
4151 alloc_region = _gc_alloc_regions[purpose]; | |
4152 if (alloc_region == NULL) return NULL; | |
4153 block = alloc_region->par_allocate(word_size); | |
4154 if (block != NULL) return block; | |
4155 // Otherwise, continue; this new region is empty, too. | |
4156 } | |
4157 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4158 retire_alloc_region(alloc_region, par); |
342 | 4159 |
4160 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4161 // Cannot allocate more regions for the given purpose. | |
4162 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4163 // Is there an alternative? | |
4164 if (purpose != alt_purpose) { | |
4165 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4166 // Has not the alternative region been aliased? | |
545 | 4167 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4168 // Try to allocate in the alternative region. |
4169 if (par) { | |
4170 block = alt_region->par_allocate(word_size); | |
4171 } else { | |
4172 block = alt_region->allocate(word_size); | |
4173 } | |
4174 // Make an alias. | |
4175 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4176 if (block != NULL) { |
4177 return block; | |
4178 } | |
4179 retire_alloc_region(alt_region, par); | |
342 | 4180 } |
4181 // Both the allocation region and the alternative one are full | |
4182 // and aliased, replace them with a new allocation region. | |
4183 purpose = alt_purpose; | |
4184 } else { | |
4185 set_gc_alloc_region(purpose, NULL); | |
4186 return NULL; | |
4187 } | |
4188 } | |
4189 | |
4190 // Now allocate a new region for allocation. | |
2152 | 4191 alloc_region = new_gc_alloc_region(purpose, word_size); |
342 | 4192 |
4193 // let the caller handle alloc failure | |
4194 if (alloc_region != NULL) { | |
4195 | |
4196 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4197 assert(alloc_region->saved_mark_at_top(), | |
4198 "Mark should have been saved already."); | |
4199 // This must be done last: once it's installed, other regions may | |
4200 // allocate in it (without holding the lock.) | |
4201 set_gc_alloc_region(purpose, alloc_region); | |
4202 | |
4203 if (par) { | |
4204 block = alloc_region->par_allocate(word_size); | |
4205 } else { | |
4206 block = alloc_region->allocate(word_size); | |
4207 } | |
4208 // Caller handles alloc failure. | |
4209 } else { | |
4210 // This sets other apis using the same old alloc region to NULL, also. | |
4211 set_gc_alloc_region(purpose, NULL); | |
4212 } | |
4213 return block; // May be NULL. | |
4214 } | |
4215 | |
4216 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4217 HeapWord* block = NULL; | |
4218 size_t free_words; | |
4219 do { | |
4220 free_words = r->free()/HeapWordSize; | |
4221 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4222 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4223 // Otherwise, try to claim it. |
4224 block = r->par_allocate(free_words); | |
4225 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4226 fill_with_object(block, free_words); |
342 | 4227 } |
4228 | |
4229 #ifndef PRODUCT | |
4230 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4231 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4232 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4233 return true; | |
4234 } | |
4235 #endif // PRODUCT | |
4236 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4237 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4238 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4239 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4240 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4241 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4242 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4243 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4244 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4245 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4246 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4247 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4248 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4249 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4250 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4251 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4252 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4253 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4254 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4255 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4256 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4257 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4258 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4259 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4260 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4261 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4262 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4263 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4264 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4265 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4266 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4267 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4268 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4269 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4270 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4271 } |
342 | 4272 |
1709 | 4273 void |
4274 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4275 { | |
4276 st->print_raw_cr("GC Termination Stats"); | |
4277 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4278 " ------waste (KiB)------"); | |
4279 st->print_raw_cr("thr ms ms % ms % attempts" | |
4280 " total alloc undo"); | |
4281 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4282 " ------- ------- -------"); | |
4283 } | |
4284 | |
4285 void | |
4286 G1ParScanThreadState::print_termination_stats(int i, | |
4287 outputStream* const st) const | |
4288 { | |
4289 const double elapsed_ms = elapsed_time() * 1000.0; | |
4290 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4291 const double term_ms = term_time() * 1000.0; | |
4292 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4293 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4294 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4295 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4296 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4297 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4298 alloc_buffer_waste() * HeapWordSize / K, | |
4299 undo_waste() * HeapWordSize / K); | |
4300 } | |
4301 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4302 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4303 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4304 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4305 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4306 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4307 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4308 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4309 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4310 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4311 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4312 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4313 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4314 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4315 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4316 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4317 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4318 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4319 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4320 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4321 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4322 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4323 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4324 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4325 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4326 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4327 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4328 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4329 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4330 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4331 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4332 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4333 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4334 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4335 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4336 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4337 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4338 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4339 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4340 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4341 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4342 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4343 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4344 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4345 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4346 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4347 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4348 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4349 |
342 | 4350 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4351 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4352 _par_scan_state(par_scan_state) { } | |
4353 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4354 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4355 // This is called _after_ do_oop_work has been called, hence after |
4356 // the object has been relocated to its new location and *p points | |
4357 // to its new location. | |
4358 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4359 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4360 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4361 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4362 HeapWord* addr = (HeapWord*)obj; |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4363 if (_g1->is_in_g1_reserved(addr)) { |
342 | 4364 _cm->grayRoot(oop(addr)); |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4365 } |
342 | 4366 } |
4367 } | |
4368 | |
4369 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4370 size_t word_sz = old->size(); | |
4371 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4372 // +1 to make the -1 indexes valid... | |
4373 int young_index = from_region->young_index_in_cset()+1; | |
4374 assert( (from_region->is_young() && young_index > 0) || | |
4375 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4376 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4377 markOop m = old->mark(); | |
545 | 4378 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4379 : m->age(); | |
4380 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4381 word_sz); |
4382 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4383 oop obj = oop(obj_ptr); | |
4384 | |
4385 if (obj_ptr == NULL) { | |
4386 // This will either forward-to-self, or detect that someone else has | |
4387 // installed a forwarding pointer. | |
4388 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4389 return _g1->handle_evacuation_failure_par(cl, old); | |
4390 } | |
4391 | |
526 | 4392 // We're going to allocate linearly, so might as well prefetch ahead. |
4393 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4394 | |
342 | 4395 oop forward_ptr = old->forward_to_atomic(obj); |
4396 if (forward_ptr == NULL) { | |
4397 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4398 if (g1p->track_object_age(alloc_purpose)) { |
4399 // We could simply do obj->incr_age(). However, this causes a | |
4400 // performance issue. obj->incr_age() will first check whether | |
4401 // the object has a displaced mark by checking its mark word; | |
4402 // getting the mark word from the new location of the object | |
4403 // stalls. So, given that we already have the mark word and we | |
4404 // are about to install it anyway, it's better to increase the | |
4405 // age on the mark word, when the object does not have a | |
4406 // displaced mark word. We're not expecting many objects to have | |
4407 // a displaced marked word, so that case is not optimized | |
4408 // further (it could be...) and we simply call obj->incr_age(). | |
4409 | |
4410 if (m->has_displaced_mark_helper()) { | |
4411 // in this case, we have to install the mark word first, | |
4412 // otherwise obj looks to be forwarded (the old mark word, | |
4413 // which contains the forward pointer, was copied) | |
4414 obj->set_mark(m); | |
4415 obj->incr_age(); | |
4416 } else { | |
4417 m = m->incr_age(); | |
545 | 4418 obj->set_mark(m); |
526 | 4419 } |
545 | 4420 _par_scan_state->age_table()->add(obj, word_sz); |
4421 } else { | |
4422 obj->set_mark(m); | |
526 | 4423 } |
4424 | |
342 | 4425 // preserve "next" mark bit |
4426 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4427 if (!use_local_bitmaps || | |
4428 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4429 // if we couldn't mark it on the local bitmap (this happens when | |
4430 // the object was not allocated in the GCLab), we have to bite | |
4431 // the bullet and do the standard parallel mark | |
4432 _cm->markAndGrayObjectIfNecessary(obj); | |
4433 } | |
4434 #if 1 | |
4435 if (_g1->isMarkedNext(old)) { | |
4436 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4437 } | |
4438 #endif | |
4439 } | |
4440 | |
4441 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4442 surv_young_words[young_index] += word_sz; | |
4443 | |
4444 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4445 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4446 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4447 _par_scan_state->push_on_queue(old_p); |
342 | 4448 } else { |
526 | 4449 // No point in using the slower heap_region_containing() method, |
4450 // given that we know obj is in the heap. | |
4451 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4452 obj->oop_iterate_backwards(_scanner); |
4453 } | |
4454 } else { | |
4455 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4456 obj = forward_ptr; | |
4457 } | |
4458 return obj; | |
4459 } | |
4460 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4461 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4462 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4463 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4464 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4465 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4466 assert(barrier != G1BarrierRS || obj != NULL, |
4467 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4468 | |
526 | 4469 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4470 if (_g1->in_cset_fast_test(obj)) { |
342 | 4471 #if G1_REM_SET_LOGGING |
526 | 4472 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4473 "into CS.", p, (void*) obj); | |
342 | 4474 #endif |
526 | 4475 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4476 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4477 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4478 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4479 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4480 } |
526 | 4481 // When scanning the RS, we only care about objs in CS. |
4482 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4483 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4484 } |
526 | 4485 } |
4486 | |
4487 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4488 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4489 } |
4490 | |
4491 if (do_gen_barrier && obj != NULL) { | |
4492 par_do_barrier(p); | |
4493 } | |
4494 } | |
4495 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4496 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4497 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4498 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4499 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4500 assert(has_partial_array_mask(p), "invariant"); |
4501 oop old = clear_partial_array_mask(p); | |
342 | 4502 assert(old->is_objArray(), "must be obj array"); |
4503 assert(old->is_forwarded(), "must be forwarded"); | |
4504 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4505 | |
4506 objArrayOop obj = objArrayOop(old->forwardee()); | |
4507 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4508 // Process ParGCArrayScanChunk elements now | |
4509 // and push the remainder back onto queue | |
4510 int start = arrayOop(old)->length(); | |
4511 int end = obj->length(); | |
4512 int remainder = end - start; | |
4513 assert(start <= end, "just checking"); | |
4514 if (remainder > 2 * ParGCArrayScanChunk) { | |
4515 // Test above combines last partial chunk with a full chunk | |
4516 end = start + ParGCArrayScanChunk; | |
4517 arrayOop(old)->set_length(end); | |
4518 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4519 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4520 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4521 _par_scan_state->push_on_queue(old_p); |
342 | 4522 } else { |
4523 // Restore length so that the heap remains parsable in | |
4524 // case of evacuation failure. | |
4525 arrayOop(old)->set_length(end); | |
4526 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4527 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4528 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4529 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4530 } |
4531 | |
4532 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4533 protected: | |
4534 G1CollectedHeap* _g1h; | |
4535 G1ParScanThreadState* _par_scan_state; | |
4536 RefToScanQueueSet* _queues; | |
4537 ParallelTaskTerminator* _terminator; | |
4538 | |
4539 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4540 RefToScanQueueSet* queues() { return _queues; } | |
4541 ParallelTaskTerminator* terminator() { return _terminator; } | |
4542 | |
4543 public: | |
4544 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4545 G1ParScanThreadState* par_scan_state, | |
4546 RefToScanQueueSet* queues, | |
4547 ParallelTaskTerminator* terminator) | |
4548 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4549 _queues(queues), _terminator(terminator) {} | |
4550 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4551 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4552 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4553 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4554 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4555 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4556 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4557 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4558 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4559 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4560 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4561 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4562 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4563 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4564 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4565 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4566 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4567 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4568 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4569 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4570 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4571 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4572 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4573 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4574 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4575 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4576 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4577 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4578 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4579 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4580 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4581 // we drain the queues as necessary. |
342 | 4582 pss->trim_queue(); |
4583 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4584 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4585 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4586 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4587 } |
342 | 4588 |
4589 class G1ParTask : public AbstractGangTask { | |
4590 protected: | |
4591 G1CollectedHeap* _g1h; | |
4592 RefToScanQueueSet *_queues; | |
4593 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4594 int _n_workers; |
342 | 4595 |
4596 Mutex _stats_lock; | |
4597 Mutex* stats_lock() { return &_stats_lock; } | |
4598 | |
4599 size_t getNCards() { | |
4600 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4601 / G1BlockOffsetSharedArray::N_bytes; | |
4602 } | |
4603 | |
4604 public: | |
4605 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4606 : AbstractGangTask("G1 collection"), | |
4607 _g1h(g1h), | |
4608 _queues(task_queues), | |
4609 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4610 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4611 _n_workers(workers) |
342 | 4612 {} |
4613 | |
4614 RefToScanQueueSet* queues() { return _queues; } | |
4615 | |
4616 RefToScanQueue *work_queue(int i) { | |
4617 return queues()->queue(i); | |
4618 } | |
4619 | |
4620 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4621 if (i >= _n_workers) return; // no work needed this round |
1611 | 4622 |
4623 double start_time_ms = os::elapsedTime() * 1000.0; | |
4624 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4625 | |
342 | 4626 ResourceMark rm; |
4627 HandleMark hm; | |
4628 | |
526 | 4629 G1ParScanThreadState pss(_g1h, i); |
4630 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4631 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4632 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4633 |
4634 pss.set_evac_closure(&scan_evac_cl); | |
4635 pss.set_evac_failure_closure(&evac_failure_cl); | |
4636 pss.set_partial_scan_closure(&partial_scan_cl); | |
4637 | |
4638 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4639 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4640 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4641 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4642 |
342 | 4643 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4644 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4645 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4646 | |
4647 OopsInHeapRegionClosure *scan_root_cl; | |
4648 OopsInHeapRegionClosure *scan_perm_cl; | |
4649 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4650 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4651 scan_root_cl = &scan_mark_root_cl; |
4652 scan_perm_cl = &scan_mark_perm_cl; | |
4653 } else { | |
4654 scan_root_cl = &only_scan_root_cl; | |
4655 scan_perm_cl = &only_scan_perm_cl; | |
4656 } | |
4657 | |
4658 pss.start_strong_roots(); | |
4659 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4660 SharedHeap::SO_AllClasses, | |
4661 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4662 &push_heap_rs_cl, |
342 | 4663 scan_perm_cl, |
4664 i); | |
4665 pss.end_strong_roots(); | |
4666 { | |
4667 double start = os::elapsedTime(); | |
4668 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4669 evac.do_void(); | |
4670 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4671 double term_ms = pss.term_time()*1000.0; | |
4672 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4673 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4674 } |
1282 | 4675 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4676 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4677 | |
4678 // Clean up any par-expanded rem sets. | |
4679 HeapRegionRemSet::par_cleanup(); | |
4680 | |
4681 if (ParallelGCVerbose) { | |
1709 | 4682 MutexLocker x(stats_lock()); |
4683 pss.print_termination_stats(i); | |
342 | 4684 } |
4685 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4686 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4687 double end_time_ms = os::elapsedTime() * 1000.0; |
4688 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4689 } |
4690 }; | |
4691 | |
4692 // *** Common G1 Evacuation Stuff | |
4693 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4694 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4695 |
342 | 4696 void |
4697 G1CollectedHeap:: | |
4698 g1_process_strong_roots(bool collecting_perm_gen, | |
4699 SharedHeap::ScanningOption so, | |
4700 OopClosure* scan_non_heap_roots, | |
4701 OopsInHeapRegionClosure* scan_rs, | |
4702 OopsInGenClosure* scan_perm, | |
4703 int worker_i) { | |
4704 // First scan the strong roots, including the perm gen. | |
4705 double ext_roots_start = os::elapsedTime(); | |
4706 double closure_app_time_sec = 0.0; | |
4707 | |
4708 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4709 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4710 buf_scan_perm.set_generation(perm_gen()); | |
4711 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4712 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4713 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4714 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4715 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4716 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4717 collecting_perm_gen, so, |
342 | 4718 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4719 &eager_scan_code_roots, |
342 | 4720 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4721 |
342 | 4722 // Finish up any enqueued closure apps. |
4723 buf_scan_non_heap_roots.done(); | |
4724 buf_scan_perm.done(); | |
4725 double ext_roots_end = os::elapsedTime(); | |
4726 g1_policy()->reset_obj_copy_time(worker_i); | |
4727 double obj_copy_time_sec = | |
4728 buf_scan_non_heap_roots.closure_app_seconds() + | |
4729 buf_scan_perm.closure_app_seconds(); | |
4730 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4731 double ext_root_time_ms = | |
4732 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4733 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4734 | |
4735 // Scan strong roots in mark stack. | |
4736 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4737 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4738 } | |
4739 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4740 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4741 | |
4742 // XXX What should this be doing in the parallel case? | |
4743 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4744 // Now scan the complement of the collection set. | |
4745 if (scan_rs != NULL) { | |
4746 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4747 } | |
4748 // Finish with the ref_processor roots. | |
4749 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4750 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4751 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4752 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4753 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4754 ref_processor()->oops_do(scan_non_heap_roots); |
4755 } | |
4756 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4757 _process_strong_tasks->all_tasks_completed(); | |
4758 } | |
4759 | |
4760 void | |
4761 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4762 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4763 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4764 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4765 } |
4766 | |
4767 | |
4768 class SaveMarksClosure: public HeapRegionClosure { | |
4769 public: | |
4770 bool doHeapRegion(HeapRegion* r) { | |
4771 r->save_marks(); | |
4772 return false; | |
4773 } | |
4774 }; | |
4775 | |
4776 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4777 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4778 SaveMarksClosure sm; |
4779 heap_region_iterate(&sm); | |
4780 } | |
4781 // We do this even in the parallel case | |
4782 perm_gen()->save_marks(); | |
4783 } | |
4784 | |
4785 void G1CollectedHeap::evacuate_collection_set() { | |
4786 set_evacuation_failed(false); | |
4787 | |
4788 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4789 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4790 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4791 | |
342 | 4792 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4793 set_par_threads(n_workers); | |
4794 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4795 | |
4796 init_for_evac_failure(NULL); | |
4797 | |
4798 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4799 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4800 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4801 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4802 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4803 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4804 StrongRootsScope srs(this); |
1709 | 4805 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4806 workers()->run_task(&g1_par_task); |
4807 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4808 StrongRootsScope srs(this); |
342 | 4809 g1_par_task.work(0); |
4810 } | |
4811 | |
4812 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4813 g1_policy()->record_par_time(par_time); | |
4814 set_par_threads(0); | |
4815 // Is this the right thing to do here? We don't save marks | |
4816 // on individual heap regions when we allocate from | |
4817 // them in parallel, so this seems like the correct place for this. | |
545 | 4818 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4819 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4820 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4821 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4822 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4823 // here too. |
342 | 4824 { |
4825 G1IsAliveClosure is_alive(this); | |
4826 G1KeepAliveClosure keep_alive(this); | |
4827 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4828 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4829 release_gc_alloc_regions(false /* totally */); |
342 | 4830 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4831 |
889 | 4832 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4833 concurrent_g1_refine()->set_use_cache(true); |
4834 | |
4835 finalize_for_evac_failure(); | |
4836 | |
4837 // Must do this before removing self-forwarding pointers, which clears | |
4838 // the per-region evac-failure flags. | |
4839 concurrent_mark()->complete_marking_in_collection_set(); | |
4840 | |
4841 if (evacuation_failed()) { | |
4842 remove_self_forwarding_pointers(); | |
4843 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4844 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4845 } else if (PrintGC) { |
4846 gclog_or_tty->print("--"); | |
4847 } | |
4848 } | |
4849 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4850 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4851 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4852 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4853 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4854 |
4855 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4856 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4857 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4858 } |
342 | 4859 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4860 } | |
4861 | |
2173 | 4862 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, |
2152 | 4863 size_t* pre_used, |
4864 FreeRegionList* free_list, | |
4865 HumongousRegionSet* humongous_proxy_set, | |
2173 | 4866 HRRSCleanupTask* hrrs_cleanup_task, |
2152 | 4867 bool par) { |
4868 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { | |
4869 if (hr->isHumongous()) { | |
4870 assert(hr->startsHumongous(), "we should only see starts humongous"); | |
4871 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); | |
4872 } else { | |
4873 free_region(hr, pre_used, free_list, par); | |
342 | 4874 } |
2173 | 4875 } else { |
4876 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task); | |
342 | 4877 } |
4878 } | |
4879 | |
2152 | 4880 void G1CollectedHeap::free_region(HeapRegion* hr, |
4881 size_t* pre_used, | |
4882 FreeRegionList* free_list, | |
4883 bool par) { | |
4884 assert(!hr->isHumongous(), "this is only for non-humongous regions"); | |
4885 assert(!hr->is_empty(), "the region should not be empty"); | |
4886 assert(free_list != NULL, "pre-condition"); | |
4887 | |
4888 *pre_used += hr->used(); | |
4889 hr->hr_clear(par, true /* clear_space */); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4890 free_list->add_as_head(hr); |
2152 | 4891 } |
4892 | |
4893 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, | |
4894 size_t* pre_used, | |
4895 FreeRegionList* free_list, | |
4896 HumongousRegionSet* humongous_proxy_set, | |
4897 bool par) { | |
4898 assert(hr->startsHumongous(), "this is only for starts humongous regions"); | |
4899 assert(free_list != NULL, "pre-condition"); | |
4900 assert(humongous_proxy_set != NULL, "pre-condition"); | |
4901 | |
4902 size_t hr_used = hr->used(); | |
4903 size_t hr_capacity = hr->capacity(); | |
4904 size_t hr_pre_used = 0; | |
4905 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); | |
4906 hr->set_notHumongous(); | |
4907 free_region(hr, &hr_pre_used, free_list, par); | |
4908 | |
4909 int i = hr->hrs_index() + 1; | |
4910 size_t num = 1; | |
4911 while ((size_t) i < n_regions()) { | |
4912 HeapRegion* curr_hr = _hrs->at(i); | |
4913 if (!curr_hr->continuesHumongous()) { | |
4914 break; | |
4915 } | |
4916 curr_hr->set_notHumongous(); | |
4917 free_region(curr_hr, &hr_pre_used, free_list, par); | |
4918 num += 1; | |
4919 i += 1; | |
4920 } | |
4921 assert(hr_pre_used == hr_used, | |
4922 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " | |
4923 "should be the same", hr_pre_used, hr_used)); | |
4924 *pre_used += hr_pre_used; | |
4925 } | |
4926 | |
4927 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, | |
4928 FreeRegionList* free_list, | |
4929 HumongousRegionSet* humongous_proxy_set, | |
4930 bool par) { | |
4931 if (pre_used > 0) { | |
4932 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; | |
342 | 4933 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
2152 | 4934 assert(_summary_bytes_used >= pre_used, |
4935 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" " | |
4936 "should be >= pre_used: "SIZE_FORMAT, | |
4937 _summary_bytes_used, pre_used)); | |
342 | 4938 _summary_bytes_used -= pre_used; |
2152 | 4939 } |
4940 if (free_list != NULL && !free_list->is_empty()) { | |
4941 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4942 _free_list.add_as_head(free_list); |
2152 | 4943 } |
4944 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { | |
4945 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); | |
4946 _humongous_set.update_from_proxy(humongous_proxy_set); | |
342 | 4947 } |
4948 } | |
4949 | |
4950 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4951 while (list != NULL) { | |
4952 guarantee( list->is_young(), "invariant" ); | |
4953 | |
4954 HeapWord* bottom = list->bottom(); | |
4955 HeapWord* end = list->end(); | |
4956 MemRegion mr(bottom, end); | |
4957 ct_bs->dirty(mr); | |
4958 | |
4959 list = list->get_next_young_region(); | |
4960 } | |
4961 } | |
4962 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4963 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4964 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4965 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4966 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4967 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4968 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4969 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4970 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4971 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4972 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4973 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4974 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4975 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4976 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4977 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4978 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4979 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4980 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4981 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4982 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4983 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4984 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4985 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4986 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4987 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4988 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4989 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4990 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4991 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4992 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4993 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4994 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4995 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4996 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4997 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4998 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4999 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5000 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5001 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5002 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5003 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5004 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5005 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5006 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5007 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5008 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5009 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5010 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5011 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5012 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5013 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5014 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5015 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5016 public: |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5017 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5018 : _g1h(g1h), _ct_bs(ct_bs) { } |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5019 virtual bool doHeapRegion(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5020 if (r->is_survivor()) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5021 _g1h->verify_dirty_region(r); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5022 } else { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5023 _g1h->verify_not_dirty_region(r); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5024 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5025 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5026 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5027 }; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5028 |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5029 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5030 // All of the region should be clean. |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5031 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5032 MemRegion mr(hr->bottom(), hr->end()); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5033 ct_bs->verify_not_dirty_region(mr); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5034 } |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5035 |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5036 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5037 // We cannot guarantee that [bottom(),end()] is dirty. Threads |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5038 // dirty allocated blocks as they allocate them. The thread that |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5039 // retires each region and replaces it with a new one will do a |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5040 // maximal allocation to fill in [pre_dummy_top(),end()] but will |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5041 // not dirty that area (one less thing to have to do while holding |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5042 // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5043 // is dirty. |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5044 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5045 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5046 ct_bs->verify_dirty_region(mr); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5047 } |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5048 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5049 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5050 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5051 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5052 verify_dirty_region(hr); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5053 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5054 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5055 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5056 void G1CollectedHeap::verify_dirty_young_regions() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5057 verify_dirty_young_list(_young_list->first_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5058 verify_dirty_young_list(_young_list->first_survivor_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5059 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5060 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5061 |
342 | 5062 void G1CollectedHeap::cleanUpCardTable() { |
5063 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
5064 double start = os::elapsedTime(); | |
5065 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5066 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5067 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5068 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5069 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5070 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5071 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5072 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5073 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5074 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5075 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5076 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5077 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5078 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5079 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5080 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5081 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5082 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5083 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5084 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5085 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5086 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5087 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5088 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5089 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5090 |
342 | 5091 double elapsed = os::elapsedTime() - start; |
5092 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5093 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5094 if (G1VerifyCTCleanup || VerifyAfterGC) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5095 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5096 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5097 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5098 #endif |
342 | 5099 } |
5100 | |
5101 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
2152 | 5102 size_t pre_used = 0; |
5103 FreeRegionList local_free_list("Local List for CSet Freeing"); | |
5104 | |
342 | 5105 double young_time_ms = 0.0; |
5106 double non_young_time_ms = 0.0; | |
5107 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5108 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5109 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5110 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5111 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5112 |
342 | 5113 G1CollectorPolicy* policy = g1_policy(); |
5114 | |
5115 double start_sec = os::elapsedTime(); | |
5116 bool non_young = true; | |
5117 | |
5118 HeapRegion* cur = cs_head; | |
5119 int age_bound = -1; | |
5120 size_t rs_lengths = 0; | |
5121 | |
5122 while (cur != NULL) { | |
2361 | 5123 assert(!is_on_master_free_list(cur), "sanity"); |
2152 | 5124 |
342 | 5125 if (non_young) { |
5126 if (cur->is_young()) { | |
5127 double end_sec = os::elapsedTime(); | |
5128 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5129 non_young_time_ms += elapsed_ms; | |
5130 | |
5131 start_sec = os::elapsedTime(); | |
5132 non_young = false; | |
5133 } | |
5134 } else { | |
2152 | 5135 double end_sec = os::elapsedTime(); |
5136 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5137 young_time_ms += elapsed_ms; | |
5138 | |
5139 start_sec = os::elapsedTime(); | |
5140 non_young = true; | |
342 | 5141 } |
5142 | |
5143 rs_lengths += cur->rem_set()->occupied(); | |
5144 | |
5145 HeapRegion* next = cur->next_in_collection_set(); | |
5146 assert(cur->in_collection_set(), "bad CS"); | |
5147 cur->set_next_in_collection_set(NULL); | |
5148 cur->set_in_collection_set(false); | |
5149 | |
5150 if (cur->is_young()) { | |
5151 int index = cur->young_index_in_cset(); | |
5152 guarantee( index != -1, "invariant" ); | |
5153 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5154 size_t words_survived = _surviving_young_words[index]; | |
5155 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5156 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5157 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5158 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5159 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5160 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5161 cur->set_next_young_region(NULL); |
342 | 5162 } else { |
5163 int index = cur->young_index_in_cset(); | |
5164 guarantee( index == -1, "invariant" ); | |
5165 } | |
5166 | |
5167 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5168 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5169 "invariant" ); | |
5170 | |
5171 if (!cur->evacuation_failed()) { | |
5172 // And the region is empty. | |
2152 | 5173 assert(!cur->is_empty(), "Should not have empty regions in a CS."); |
5174 free_region(cur, &pre_used, &local_free_list, false /* par */); | |
342 | 5175 } else { |
5176 cur->uninstall_surv_rate_group(); | |
5177 if (cur->is_young()) | |
5178 cur->set_young_index_in_cset(-1); | |
5179 cur->set_not_young(); | |
5180 cur->set_evacuation_failed(false); | |
5181 } | |
5182 cur = next; | |
5183 } | |
5184 | |
5185 policy->record_max_rs_lengths(rs_lengths); | |
5186 policy->cset_regions_freed(); | |
5187 | |
5188 double end_sec = os::elapsedTime(); | |
5189 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5190 if (non_young) | |
5191 non_young_time_ms += elapsed_ms; | |
5192 else | |
5193 young_time_ms += elapsed_ms; | |
5194 | |
2152 | 5195 update_sets_after_freeing_regions(pre_used, &local_free_list, |
5196 NULL /* humongous_proxy_set */, | |
5197 false /* par */); | |
342 | 5198 policy->record_young_free_cset_time_ms(young_time_ms); |
5199 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5200 } | |
5201 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5202 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5203 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5204 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5205 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5206 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5207 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5208 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5209 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5210 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5211 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5212 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5213 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5214 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5215 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5216 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5217 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5218 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5219 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5220 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5221 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5222 |
2152 | 5223 void G1CollectedHeap::set_free_regions_coming() { |
5224 if (G1ConcRegionFreeingVerbose) { | |
5225 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5226 "setting free regions coming"); | |
5227 } | |
5228 | |
5229 assert(!free_regions_coming(), "pre-condition"); | |
5230 _free_regions_coming = true; | |
342 | 5231 } |
5232 | |
2152 | 5233 void G1CollectedHeap::reset_free_regions_coming() { |
5234 { | |
5235 assert(free_regions_coming(), "pre-condition"); | |
5236 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5237 _free_regions_coming = false; | |
5238 SecondaryFreeList_lock->notify_all(); | |
5239 } | |
5240 | |
5241 if (G1ConcRegionFreeingVerbose) { | |
5242 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5243 "reset free regions coming"); | |
342 | 5244 } |
5245 } | |
5246 | |
2152 | 5247 void G1CollectedHeap::wait_while_free_regions_coming() { |
5248 // Most of the time we won't have to wait, so let's do a quick test | |
5249 // first before we take the lock. | |
5250 if (!free_regions_coming()) { | |
5251 return; | |
5252 } | |
5253 | |
5254 if (G1ConcRegionFreeingVerbose) { | |
5255 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5256 "waiting for free regions"); | |
342 | 5257 } |
5258 | |
5259 { | |
2152 | 5260 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
5261 while (free_regions_coming()) { | |
5262 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
342 | 5263 } |
2152 | 5264 } |
5265 | |
5266 if (G1ConcRegionFreeingVerbose) { | |
5267 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5268 "done waiting for free regions"); | |
5269 } | |
342 | 5270 } |
5271 | |
5272 size_t G1CollectedHeap::n_regions() { | |
5273 return _hrs->length(); | |
5274 } | |
5275 | |
5276 size_t G1CollectedHeap::max_regions() { | |
5277 return | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
5278 (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) / |
342 | 5279 HeapRegion::GrainBytes; |
5280 } | |
5281 | |
5282 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5283 assert(heap_lock_held_for_gc(), | |
5284 "the heap lock should already be held by or for this thread"); | |
5285 _young_list->push_region(hr); | |
5286 g1_policy()->set_region_short_lived(hr); | |
5287 } | |
5288 | |
5289 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5290 private: | |
5291 bool _success; | |
5292 public: | |
5293 NoYoungRegionsClosure() : _success(true) { } | |
5294 bool doHeapRegion(HeapRegion* r) { | |
5295 if (r->is_young()) { | |
5296 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5297 r->bottom(), r->end()); | |
5298 _success = false; | |
5299 } | |
5300 return false; | |
5301 } | |
5302 bool success() { return _success; } | |
5303 }; | |
5304 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5305 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5306 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5307 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5308 if (check_heap) { |
342 | 5309 NoYoungRegionsClosure closure; |
5310 heap_region_iterate(&closure); | |
5311 ret = ret && closure.success(); | |
5312 } | |
5313 | |
5314 return ret; | |
5315 } | |
5316 | |
5317 void G1CollectedHeap::empty_young_list() { | |
5318 assert(heap_lock_held_for_gc(), | |
5319 "the heap lock should already be held by or for this thread"); | |
5320 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5321 | |
5322 _young_list->empty_list(); | |
5323 } | |
5324 | |
5325 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5326 bool no_allocs = true; | |
5327 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5328 HeapRegion* r = _gc_alloc_regions[ap]; | |
5329 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5330 } | |
5331 return no_allocs; | |
5332 } | |
5333 | |
545 | 5334 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5335 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5336 HeapRegion* r = _gc_alloc_regions[ap]; | |
5337 if (r != NULL) { | |
5338 // Check for aliases. | |
5339 bool has_processed_alias = false; | |
5340 for (int i = 0; i < ap; ++i) { | |
5341 if (_gc_alloc_regions[i] == r) { | |
5342 has_processed_alias = true; | |
5343 break; | |
5344 } | |
5345 } | |
5346 if (!has_processed_alias) { | |
545 | 5347 retire_alloc_region(r, false /* par */); |
342 | 5348 } |
5349 } | |
5350 } | |
5351 } | |
5352 | |
5353 // Done at the start of full GC. | |
5354 void G1CollectedHeap::tear_down_region_lists() { | |
2152 | 5355 _free_list.remove_all(); |
342 | 5356 } |
5357 | |
5358 class RegionResetter: public HeapRegionClosure { | |
2152 | 5359 G1CollectedHeap* _g1h; |
5360 FreeRegionList _local_free_list; | |
5361 | |
342 | 5362 public: |
2152 | 5363 RegionResetter() : _g1h(G1CollectedHeap::heap()), |
5364 _local_free_list("Local Free List for RegionResetter") { } | |
5365 | |
342 | 5366 bool doHeapRegion(HeapRegion* r) { |
5367 if (r->continuesHumongous()) return false; | |
5368 if (r->top() > r->bottom()) { | |
5369 if (r->top() < r->end()) { | |
5370 Copy::fill_to_words(r->top(), | |
5371 pointer_delta(r->end(), r->top())); | |
5372 } | |
5373 } else { | |
5374 assert(r->is_empty(), "tautology"); | |
2152 | 5375 _local_free_list.add_as_tail(r); |
342 | 5376 } |
5377 return false; | |
5378 } | |
5379 | |
2152 | 5380 void update_free_lists() { |
5381 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, | |
5382 false /* par */); | |
5383 } | |
342 | 5384 }; |
5385 | |
5386 // Done at the end of full GC. | |
5387 void G1CollectedHeap::rebuild_region_lists() { | |
5388 // This needs to go at the end of the full GC. | |
5389 RegionResetter rs; | |
5390 heap_region_iterate(&rs); | |
2152 | 5391 rs.update_free_lists(); |
342 | 5392 } |
5393 | |
5394 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5395 _refine_cte_cl->set_concurrent(concurrent); | |
5396 } | |
5397 | |
5398 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5399 HeapRegion* hr = heap_region_containing(p); | |
5400 if (hr == NULL) { | |
5401 return is_in_permanent(p); | |
5402 } else { | |
5403 return hr->is_in(p); | |
5404 } | |
5405 } | |
2152 | 5406 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5407 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5408 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5409 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5410 assert(!force || g1_policy()->can_expand_young_list(), |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5411 "if force is true we should be able to expand the young list"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5412 if (force || !g1_policy()->is_young_list_full()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5413 HeapRegion* new_alloc_region = new_region(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5414 false /* do_expand */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5415 if (new_alloc_region != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5416 g1_policy()->update_region_num(true /* next_is_young */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5417 set_region_short_lived_locked(new_alloc_region); |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
5418 g1mm()->update_eden_counters(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5419 return new_alloc_region; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5420 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5421 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5422 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5423 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5424 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5425 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5426 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5427 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5428 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5429 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5430 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5431 _summary_bytes_used += allocated_bytes; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5432 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5433 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5434 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5435 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5436 return _g1h->new_mutator_alloc_region(word_size, force); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5437 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5438 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5439 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5440 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5441 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5442 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5443 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5444 // Heap region set verification |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5445 |
2152 | 5446 class VerifyRegionListsClosure : public HeapRegionClosure { |
5447 private: | |
5448 HumongousRegionSet* _humongous_set; | |
5449 FreeRegionList* _free_list; | |
5450 size_t _region_count; | |
5451 | |
5452 public: | |
5453 VerifyRegionListsClosure(HumongousRegionSet* humongous_set, | |
5454 FreeRegionList* free_list) : | |
5455 _humongous_set(humongous_set), _free_list(free_list), | |
5456 _region_count(0) { } | |
5457 | |
5458 size_t region_count() { return _region_count; } | |
5459 | |
5460 bool doHeapRegion(HeapRegion* hr) { | |
5461 _region_count += 1; | |
5462 | |
5463 if (hr->continuesHumongous()) { | |
5464 return false; | |
5465 } | |
5466 | |
5467 if (hr->is_young()) { | |
5468 // TODO | |
5469 } else if (hr->startsHumongous()) { | |
5470 _humongous_set->verify_next_region(hr); | |
5471 } else if (hr->is_empty()) { | |
5472 _free_list->verify_next_region(hr); | |
5473 } | |
5474 return false; | |
5475 } | |
5476 }; | |
5477 | |
5478 void G1CollectedHeap::verify_region_sets() { | |
5479 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | |
5480 | |
5481 // First, check the explicit lists. | |
5482 _free_list.verify(); | |
5483 { | |
5484 // Given that a concurrent operation might be adding regions to | |
5485 // the secondary free list we have to take the lock before | |
5486 // verifying it. | |
5487 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5488 _secondary_free_list.verify(); | |
5489 } | |
5490 _humongous_set.verify(); | |
5491 | |
5492 // If a concurrent region freeing operation is in progress it will | |
5493 // be difficult to correctly attributed any free regions we come | |
5494 // across to the correct free list given that they might belong to | |
5495 // one of several (free_list, secondary_free_list, any local lists, | |
5496 // etc.). So, if that's the case we will skip the rest of the | |
5497 // verification operation. Alternatively, waiting for the concurrent | |
5498 // operation to complete will have a non-trivial effect on the GC's | |
5499 // operation (no concurrent operation will last longer than the | |
5500 // interval between two calls to verification) and it might hide | |
5501 // any issues that we would like to catch during testing. | |
5502 if (free_regions_coming()) { | |
5503 return; | |
5504 } | |
5505 | |
2361 | 5506 // Make sure we append the secondary_free_list on the free_list so |
5507 // that all free regions we will come across can be safely | |
5508 // attributed to the free_list. | |
5509 append_secondary_free_list_if_not_empty_with_lock(); | |
2152 | 5510 |
5511 // Finally, make sure that the region accounting in the lists is | |
5512 // consistent with what we see in the heap. | |
5513 _humongous_set.verify_start(); | |
5514 _free_list.verify_start(); | |
5515 | |
5516 VerifyRegionListsClosure cl(&_humongous_set, &_free_list); | |
5517 heap_region_iterate(&cl); | |
5518 | |
5519 _humongous_set.verify_end(); | |
5520 _free_list.verify_end(); | |
342 | 5521 } |