Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3285:49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
Summary: It introduces non-product cmd line parameter G1DummyRegionsPerGC which indicates how many "dummy" regions to allocate at the end of each GC. This allows the G1 heap to grow artificially and makes concurrent marking cycles more frequent irrespective of what the application that is running is doing. The dummy regions will be found totally empty during cleanup so this parameter can also be used to stress the concurrent cleanup operation.
Reviewed-by: brutisso, johnc
author | tonyp |
---|---|
date | Tue, 19 Apr 2011 15:46:59 -0400 |
parents | abdfc822206f |
children | b52782ae3880 |
rev | line source |
---|---|
342 | 1 /* |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
1972 | 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _cmThread->stop(); |
429 } | |
430 | |
431 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
432 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
433 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
434 | |
435 // Count the dirty cards at the start. | |
436 CountNonCleanMemRegionClosure count1(this); | |
437 ct_bs->mod_card_iterate(&count1); | |
438 int orig_count = count1.n(); | |
439 | |
440 // First clear the logged cards. | |
441 ClearLoggedCardTableEntryClosure clear; | |
442 dcqs.set_closure(&clear); | |
443 dcqs.apply_closure_to_all_completed_buffers(); | |
444 dcqs.iterate_closure_all_threads(false); | |
445 clear.print_histo(); | |
446 | |
447 // Now ensure that there's no dirty cards. | |
448 CountNonCleanMemRegionClosure count2(this); | |
449 ct_bs->mod_card_iterate(&count2); | |
450 if (count2.n() != 0) { | |
451 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
452 count2.n(), orig_count); | |
453 } | |
454 guarantee(count2.n() == 0, "Card table should be clean."); | |
455 | |
456 RedirtyLoggedCardTableEntryClosure redirty; | |
457 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
458 dcqs.apply_closure_to_all_completed_buffers(); | |
459 dcqs.iterate_closure_all_threads(false); | |
460 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
461 clear.calls(), orig_count); | |
462 guarantee(redirty.calls() == clear.calls(), | |
463 "Or else mechanism is broken."); | |
464 | |
465 CountNonCleanMemRegionClosure count3(this); | |
466 ct_bs->mod_card_iterate(&count3); | |
467 if (count3.n() != orig_count) { | |
468 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
469 orig_count, count3.n()); | |
470 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
471 } | |
472 | |
473 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
474 } | |
475 | |
476 // Private class members. | |
477 | |
478 G1CollectedHeap* G1CollectedHeap::_g1h; | |
479 | |
480 // Private methods. | |
481 | |
2152 | 482 HeapRegion* |
2361 | 483 G1CollectedHeap::new_region_try_secondary_free_list() { |
2152 | 484 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
485 while (!_secondary_free_list.is_empty() || free_regions_coming()) { | |
486 if (!_secondary_free_list.is_empty()) { | |
487 if (G1ConcRegionFreeingVerbose) { | |
488 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
489 "secondary_free_list has "SIZE_FORMAT" entries", | |
490 _secondary_free_list.length()); | |
491 } | |
492 // It looks as if there are free regions available on the | |
493 // secondary_free_list. Let's move them to the free_list and try | |
494 // again to allocate from it. | |
495 append_secondary_free_list(); | |
496 | |
497 assert(!_free_list.is_empty(), "if the secondary_free_list was not " | |
498 "empty we should have moved at least one entry to the free_list"); | |
499 HeapRegion* res = _free_list.remove_head(); | |
500 if (G1ConcRegionFreeingVerbose) { | |
501 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
502 "allocated "HR_FORMAT" from secondary_free_list", | |
503 HR_FORMAT_PARAMS(res)); | |
504 } | |
505 return res; | |
506 } | |
507 | |
508 // Wait here until we get notifed either when (a) there are no | |
509 // more free regions coming or (b) some regions have been moved on | |
510 // the secondary_free_list. | |
511 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
512 } | |
513 | |
514 if (G1ConcRegionFreeingVerbose) { | |
515 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
516 "could not allocate from secondary_free_list"); | |
517 } | |
518 return NULL; | |
519 } | |
520 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
521 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { |
2152 | 522 assert(!isHumongous(word_size) || |
523 word_size <= (size_t) HeapRegion::GrainWords, | |
524 "the only time we use this to allocate a humongous region is " | |
525 "when we are allocating a single humongous region"); | |
526 | |
527 HeapRegion* res; | |
528 if (G1StressConcRegionFreeing) { | |
529 if (!_secondary_free_list.is_empty()) { | |
530 if (G1ConcRegionFreeingVerbose) { | |
531 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
532 "forced to look at the secondary_free_list"); | |
533 } | |
2361 | 534 res = new_region_try_secondary_free_list(); |
2152 | 535 if (res != NULL) { |
536 return res; | |
537 } | |
538 } | |
539 } | |
540 res = _free_list.remove_head_or_null(); | |
541 if (res == NULL) { | |
542 if (G1ConcRegionFreeingVerbose) { | |
543 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
544 "res == NULL, trying the secondary_free_list"); | |
545 } | |
2361 | 546 res = new_region_try_secondary_free_list(); |
2152 | 547 } |
342 | 548 if (res == NULL && do_expand) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
549 if (expand(word_size * HeapWordSize)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
550 // The expansion succeeded and so we should have at least one |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
551 // region on the free list. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
552 res = _free_list.remove_head(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
553 } |
342 | 554 } |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
555 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
556 if (G1PrintHeapRegions) { |
2152 | 557 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " |
558 "top "PTR_FORMAT, res->hrs_index(), | |
559 res->bottom(), res->end(), res->top()); | |
342 | 560 } |
561 } | |
562 return res; | |
563 } | |
564 | |
2152 | 565 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, |
566 size_t word_size) { | |
342 | 567 HeapRegion* alloc_region = NULL; |
568 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
569 alloc_region = new_region(word_size, true /* do_expand */); |
342 | 570 if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
545 | 571 alloc_region->set_survivor(); |
342 | 572 } |
573 ++_gc_alloc_region_counts[purpose]; | |
574 } else { | |
575 g1_policy()->note_alloc_region_limit_reached(purpose); | |
576 } | |
577 return alloc_region; | |
578 } | |
579 | |
2152 | 580 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, |
581 size_t word_size) { | |
2361 | 582 assert(isHumongous(word_size), "word_size should be humongous"); |
583 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
584 | |
2152 | 585 int first = -1; |
586 if (num_regions == 1) { | |
587 // Only one region to allocate, no need to go through the slower | |
588 // path. The caller will attempt the expasion if this fails, so | |
589 // let's not try to expand here too. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
590 HeapRegion* hr = new_region(word_size, false /* do_expand */); |
2152 | 591 if (hr != NULL) { |
592 first = hr->hrs_index(); | |
593 } else { | |
594 first = -1; | |
595 } | |
596 } else { | |
597 // We can't allocate humongous regions while cleanupComplete() is | |
598 // running, since some of the regions we find to be empty might not | |
599 // yet be added to the free list and it is not straightforward to | |
600 // know which list they are on so that we can remove them. Note | |
601 // that we only need to do this if we need to allocate more than | |
602 // one region to satisfy the current humongous allocation | |
603 // request. If we are only allocating one region we use the common | |
604 // region allocation code (see above). | |
605 wait_while_free_regions_coming(); | |
2361 | 606 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 607 |
608 if (free_regions() >= num_regions) { | |
609 first = _hrs->find_contiguous(num_regions); | |
610 if (first != -1) { | |
611 for (int i = first; i < first + (int) num_regions; ++i) { | |
612 HeapRegion* hr = _hrs->at(i); | |
613 assert(hr->is_empty(), "sanity"); | |
2361 | 614 assert(is_on_master_free_list(hr), "sanity"); |
2152 | 615 hr->set_pending_removal(true); |
616 } | |
617 _free_list.remove_all_pending(num_regions); | |
618 } | |
619 } | |
620 } | |
621 return first; | |
622 } | |
623 | |
2361 | 624 HeapWord* |
625 G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, | |
626 size_t num_regions, | |
627 size_t word_size) { | |
628 assert(first != -1, "pre-condition"); | |
629 assert(isHumongous(word_size), "word_size should be humongous"); | |
630 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
631 | |
632 // Index of last region in the series + 1. | |
633 int last = first + (int) num_regions; | |
634 | |
635 // We need to initialize the region(s) we just discovered. This is | |
636 // a bit tricky given that it can happen concurrently with | |
637 // refinement threads refining cards on these regions and | |
638 // potentially wanting to refine the BOT as they are scanning | |
639 // those cards (this can happen shortly after a cleanup; see CR | |
640 // 6991377). So we have to set up the region(s) carefully and in | |
641 // a specific order. | |
642 | |
643 // The word size sum of all the regions we will allocate. | |
644 size_t word_size_sum = num_regions * HeapRegion::GrainWords; | |
645 assert(word_size <= word_size_sum, "sanity"); | |
646 | |
647 // This will be the "starts humongous" region. | |
648 HeapRegion* first_hr = _hrs->at(first); | |
649 // The header of the new object will be placed at the bottom of | |
650 // the first region. | |
651 HeapWord* new_obj = first_hr->bottom(); | |
652 // This will be the new end of the first region in the series that | |
653 // should also match the end of the last region in the seriers. | |
654 HeapWord* new_end = new_obj + word_size_sum; | |
655 // This will be the new top of the first region that will reflect | |
656 // this allocation. | |
657 HeapWord* new_top = new_obj + word_size; | |
658 | |
659 // First, we need to zero the header of the space that we will be | |
660 // allocating. When we update top further down, some refinement | |
661 // threads might try to scan the region. By zeroing the header we | |
662 // ensure that any thread that will try to scan the region will | |
663 // come across the zero klass word and bail out. | |
664 // | |
665 // NOTE: It would not have been correct to have used | |
666 // CollectedHeap::fill_with_object() and make the space look like | |
667 // an int array. The thread that is doing the allocation will | |
668 // later update the object header to a potentially different array | |
669 // type and, for a very short period of time, the klass and length | |
670 // fields will be inconsistent. This could cause a refinement | |
671 // thread to calculate the object size incorrectly. | |
672 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); | |
673 | |
674 // We will set up the first region as "starts humongous". This | |
675 // will also update the BOT covering all the regions to reflect | |
676 // that there is a single object that starts at the bottom of the | |
677 // first region. | |
678 first_hr->set_startsHumongous(new_top, new_end); | |
679 | |
680 // Then, if there are any, we will set up the "continues | |
681 // humongous" regions. | |
682 HeapRegion* hr = NULL; | |
683 for (int i = first + 1; i < last; ++i) { | |
684 hr = _hrs->at(i); | |
685 hr->set_continuesHumongous(first_hr); | |
686 } | |
687 // If we have "continues humongous" regions (hr != NULL), then the | |
688 // end of the last one should match new_end. | |
689 assert(hr == NULL || hr->end() == new_end, "sanity"); | |
690 | |
691 // Up to this point no concurrent thread would have been able to | |
692 // do any scanning on any region in this series. All the top | |
693 // fields still point to bottom, so the intersection between | |
694 // [bottom,top] and [card_start,card_end] will be empty. Before we | |
695 // update the top fields, we'll do a storestore to make sure that | |
696 // no thread sees the update to top before the zeroing of the | |
697 // object header and the BOT initialization. | |
698 OrderAccess::storestore(); | |
699 | |
700 // Now that the BOT and the object header have been initialized, | |
701 // we can update top of the "starts humongous" region. | |
702 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), | |
703 "new_top should be in this region"); | |
704 first_hr->set_top(new_top); | |
705 | |
706 // Now, we will update the top fields of the "continues humongous" | |
707 // regions. The reason we need to do this is that, otherwise, | |
708 // these regions would look empty and this will confuse parts of | |
709 // G1. For example, the code that looks for a consecutive number | |
710 // of empty regions will consider them empty and try to | |
711 // re-allocate them. We can extend is_empty() to also include | |
712 // !continuesHumongous(), but it is easier to just update the top | |
713 // fields here. The way we set top for all regions (i.e., top == | |
714 // end for all regions but the last one, top == new_top for the | |
715 // last one) is actually used when we will free up the humongous | |
716 // region in free_humongous_region(). | |
717 hr = NULL; | |
718 for (int i = first + 1; i < last; ++i) { | |
719 hr = _hrs->at(i); | |
720 if ((i + 1) == last) { | |
721 // last continues humongous region | |
722 assert(hr->bottom() < new_top && new_top <= hr->end(), | |
723 "new_top should fall on this region"); | |
724 hr->set_top(new_top); | |
725 } else { | |
726 // not last one | |
727 assert(new_top > hr->end(), "new_top should be above this region"); | |
728 hr->set_top(hr->end()); | |
729 } | |
730 } | |
731 // If we have continues humongous regions (hr != NULL), then the | |
732 // end of the last one should match new_end and its top should | |
733 // match new_top. | |
734 assert(hr == NULL || | |
735 (hr->end() == new_end && hr->top() == new_top), "sanity"); | |
736 | |
737 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); | |
738 _summary_bytes_used += first_hr->used(); | |
739 _humongous_set.add(first_hr); | |
740 | |
741 return new_obj; | |
742 } | |
743 | |
342 | 744 // If could fit into free regions w/o expansion, try. |
745 // Otherwise, if can expand, do so. | |
746 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 747 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
2152 | 748 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
749 | |
750 verify_region_sets_optional(); | |
342 | 751 |
752 size_t num_regions = | |
1973 | 753 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 754 size_t x_size = expansion_regions(); |
2152 | 755 size_t fs = _hrs->free_suffix(); |
756 int first = humongous_obj_allocate_find_first(num_regions, word_size); | |
757 if (first == -1) { | |
758 // The only thing we can do now is attempt expansion. | |
342 | 759 if (fs + x_size >= num_regions) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
760 // If the number of regions we're trying to allocate for this |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
761 // object is at most the number of regions in the free suffix, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
762 // then the call to humongous_obj_allocate_find_first() above |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
763 // should have succeeded and we wouldn't be here. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
764 // |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
765 // We should only be trying to expand when the free suffix is |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
766 // not sufficient for the object _and_ we have some expansion |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
767 // room available. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
768 assert(num_regions > fs, "earlier allocation should have succeeded"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
769 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
770 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
771 first = humongous_obj_allocate_find_first(num_regions, word_size); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
772 // If the expansion was successful then the allocation |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
773 // should have been successful. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
774 assert(first != -1, "this should have worked"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
775 } |
2152 | 776 } |
777 } | |
778 | |
2361 | 779 HeapWord* result = NULL; |
2152 | 780 if (first != -1) { |
2361 | 781 result = |
782 humongous_obj_allocate_initialize_regions(first, num_regions, word_size); | |
783 assert(result != NULL, "it should always return a valid result"); | |
2152 | 784 } |
785 | |
786 verify_region_sets_optional(); | |
2361 | 787 |
788 return result; | |
342 | 789 } |
790 | |
1973 | 791 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
792 assert_heap_not_locked_and_not_at_safepoint(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
793 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
794 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
795 unsigned int dummy_gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
796 return attempt_allocation(word_size, &dummy_gc_count_before); |
342 | 797 } |
798 | |
799 HeapWord* | |
800 G1CollectedHeap::mem_allocate(size_t word_size, | |
801 bool is_noref, | |
802 bool is_tlab, | |
1973 | 803 bool* gc_overhead_limit_was_exceeded) { |
804 assert_heap_not_locked_and_not_at_safepoint(); | |
805 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
806 "to allocate TLABs"); | |
342 | 807 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
808 // Loop until the allocation is satisified, or unsatisfied after GC. |
1973 | 809 for (int try_count = 1; /* we'll return */; try_count += 1) { |
810 unsigned int gc_count_before; | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
811 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
812 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
813 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
814 result = attempt_allocation(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
815 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
816 result = attempt_allocation_humongous(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
817 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
818 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
819 return result; |
342 | 820 } |
821 | |
822 // Create the garbage collection operation... | |
1973 | 823 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 824 // ...and get the VM thread to execute it. |
825 VMThread::execute(&op); | |
1973 | 826 |
827 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
828 // If the operation was successful we'll return the result even | |
829 // if it is NULL. If the allocation attempt failed immediately | |
830 // after a Full GC, it's unlikely we'll be able to allocate now. | |
831 HeapWord* result = op.result(); | |
832 if (result != NULL && !isHumongous(word_size)) { | |
833 // Allocations that take place on VM operations do not do any | |
834 // card dirtying and we have to do it here. We only have to do | |
835 // this for non-humongous allocations, though. | |
836 dirty_young_block(result, word_size); | |
837 } | |
342 | 838 return result; |
1973 | 839 } else { |
840 assert(op.result() == NULL, | |
841 "the result should be NULL if the VM op did not succeed"); | |
342 | 842 } |
843 | |
844 // Give a warning if we seem to be looping forever. | |
845 if ((QueuedAllocationWarningCount > 0) && | |
846 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 847 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 848 } |
849 } | |
1973 | 850 |
851 ShouldNotReachHere(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
852 return NULL; |
342 | 853 } |
854 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
855 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
856 unsigned int *gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
857 // Make sure you read the note in attempt_allocation_humongous(). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
858 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
859 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
860 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
861 "be called for humongous allocation requests"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
862 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
863 // We should only get here after the first-level allocation attempt |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
864 // (attempt_allocation()) failed to allocate. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
865 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
866 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
867 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
868 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
869 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
870 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
871 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
872 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
873 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
874 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
875 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
876 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
877 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
878 result = _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
879 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
880 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
881 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
882 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
883 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
884 // If we reach here, attempt_allocation_locked() above failed to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
885 // allocate a new region. So the mutator alloc region should be NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
886 assert(_mutator_alloc_region.get() == NULL, "only way to get here"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
887 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
888 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
889 if (g1_policy()->can_expand_young_list()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
890 result = _mutator_alloc_region.attempt_allocation_force(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
891 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
892 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
893 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
894 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
895 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
896 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
897 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
898 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
899 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
900 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
901 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
902 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
903 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
904 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
905 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
906 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
907 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
908 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
909 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
910 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
911 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
912 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
913 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
914 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
915 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
916 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
917 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
918 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
919 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
920 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
921 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
922 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
923 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
924 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
925 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
926 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
927 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
928 // performed a collection and reclaimed enough space. We do the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
929 // first attempt (without holding the Heap_lock) here and the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
930 // follow-on attempt will be at the start of the next loop |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
931 // iteration (after taking the Heap_lock). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
932 result = _mutator_alloc_region.attempt_allocation(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
933 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
934 if (result != NULL ){ |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
935 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
936 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
937 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
938 // Give a warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
939 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
940 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
941 warning("G1CollectedHeap::attempt_allocation_slow() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
942 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
943 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
944 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
945 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
946 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
947 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
948 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
949 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
950 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
951 unsigned int * gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
952 // The structure of this method has a lot of similarities to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
953 // attempt_allocation_slow(). The reason these two were not merged |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
954 // into a single one is that such a method would require several "if |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
955 // allocation is not humongous do this, otherwise do that" |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
956 // conditional paths which would obscure its flow. In fact, an early |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
957 // version of this code did use a unified method which was harder to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
958 // follow and, as a result, it had subtle bugs that were hard to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
959 // track down. So keeping these two methods separate allows each to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
960 // be more readable. It will be good to keep these two in sync as |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
961 // much as possible. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
962 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
963 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
964 assert(isHumongous(word_size), "attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
965 "should only be called for humongous allocations"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
966 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
967 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
968 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
969 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
970 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
971 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
972 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
973 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
974 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
975 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
976 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
977 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
978 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
979 // Given that humongous objects are not allocated in young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
980 // regions, we'll first try to do the allocation without doing a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
981 // collection hoping that there's enough space in the heap. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
982 result = humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
983 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
984 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
985 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
986 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
987 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
988 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
989 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
990 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
991 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
992 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
993 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
994 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
995 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
996 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
997 // If we failed to allocate the humongous object, we should try to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
998 // do a collection pause (if we're allowed) in case it reclaims |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
999 // enough space for the allocation to succeed after the pause. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1000 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1001 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1002 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1003 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1004 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1005 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1006 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1007 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1008 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1009 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1010 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1011 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1012 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1013 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1014 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1015 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1016 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1017 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1018 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1019 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1020 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1021 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1022 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1023 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1024 // performed a collection and reclaimed enough space. Give a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1025 // warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1026 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1027 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1028 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1029 warning("G1CollectedHeap::attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1030 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1031 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1032 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1033 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1034 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1035 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1036 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1037 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1038 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1039 bool expect_null_mutator_alloc_region) { |
2152 | 1040 assert_at_safepoint(true /* should_be_vm_thread */); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1041 assert(_mutator_alloc_region.get() == NULL || |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1042 !expect_null_mutator_alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1043 "the current alloc region was unexpectedly found to be non-NULL"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1044 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1045 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1046 return _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1047 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1048 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1049 return humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1050 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1051 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1052 ShouldNotReachHere(); |
342 | 1053 } |
1054 | |
636 | 1055 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1056 // first, make sure that the GC alloc region list is empty (it should!) | |
1057 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1058 release_gc_alloc_regions(true /* totally */); | |
1059 } | |
1060 | |
342 | 1061 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1062 ModRefBarrierSet* _mr_bs; | |
1063 public: | |
1064 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1065 bool doHeapRegion(HeapRegion* r) { | |
1066 r->reset_gc_time_stamp(); | |
1067 if (r->continuesHumongous()) | |
1068 return false; | |
1069 HeapRegionRemSet* hrrs = r->rem_set(); | |
1070 if (hrrs != NULL) hrrs->clear(); | |
1071 // You might think here that we could clear just the cards | |
1072 // corresponding to the used region. But no: if we leave a dirty card | |
1073 // in a region we might allocate into, then it would prevent that card | |
1074 // from being enqueued, and cause it to be missed. | |
1075 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1076 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1077 return false; | |
1078 } | |
1079 }; | |
1080 | |
1081 | |
1082 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1083 ModRefBarrierSet* _mr_bs; | |
1084 public: | |
1085 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1086 bool doHeapRegion(HeapRegion* r) { | |
1087 if (r->continuesHumongous()) return false; | |
1088 if (r->used_region().word_size() != 0) { | |
1089 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1090 } | |
1091 return false; | |
1092 } | |
1093 }; | |
1094 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1095 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1096 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1097 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1098 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1099 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1100 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1101 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1102 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1103 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1104 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1105 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1106 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1107 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1108 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1109 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1110 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1111 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1112 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1113 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1114 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1115 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1116 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1117 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1118 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1119 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1120 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1121 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1122 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1123 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1124 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1125 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1126 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1127 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1128 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1129 |
1973 | 1130 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1131 bool clear_all_soft_refs, |
342 | 1132 size_t word_size) { |
2152 | 1133 assert_at_safepoint(true /* should_be_vm_thread */); |
1134 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1135 if (GC_locker::check_active_before_gc()) { |
1973 | 1136 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1137 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1138 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
1139 SvcGCMarker sgcm(SvcGCMarker::FULL); |
342 | 1140 ResourceMark rm; |
1141 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1142 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1143 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1144 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1145 |
2152 | 1146 verify_region_sets_optional(); |
342 | 1147 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1148 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1149 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1150 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1151 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1152 |
342 | 1153 { |
1154 IsGCActiveMark x; | |
1155 | |
1156 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1157 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1158 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1159 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1160 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1161 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1162 PrintGC, true, gclog_or_tty); |
342 | 1163 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1164 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1165 |
342 | 1166 double start = os::elapsedTime(); |
1167 g1_policy()->record_full_collection_start(); | |
1168 | |
2152 | 1169 wait_while_free_regions_coming(); |
2361 | 1170 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 1171 |
342 | 1172 gc_prologue(true); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1173 increment_total_collections(true /* full gc */); |
342 | 1174 |
1175 size_t g1h_prev_used = used(); | |
1176 assert(used() == recalculate_used(), "Should be equal"); | |
1177 | |
1178 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1179 HandleMark hm; // Discard invalid handles created during verification | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1180 gclog_or_tty->print(" VerifyBeforeGC:"); |
342 | 1181 prepare_for_verify(); |
1182 Universe::verify(true); | |
1183 } | |
1184 | |
1185 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1186 | |
1187 // We want to discover references, but not process them yet. | |
1188 // This mode is disabled in | |
1189 // instanceRefKlass::process_discovered_references if the | |
1190 // generation does some collection work, or | |
1191 // instanceRefKlass::enqueue_discovered_references if the | |
1192 // generation returns without doing any work. | |
1193 ref_processor()->disable_discovery(); | |
1194 ref_processor()->abandon_partial_discovery(); | |
1195 ref_processor()->verify_no_references_recorded(); | |
1196 | |
1197 // Abandon current iterations of concurrent marking and concurrent | |
1198 // refinement, if any are in progress. | |
1199 concurrent_mark()->abort(); | |
1200 | |
1201 // Make sure we'll choose a new allocation region afterwards. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1202 release_mutator_alloc_region(); |
636 | 1203 abandon_gc_alloc_regions(); |
1861 | 1204 g1_rem_set()->cleanupHRRS(); |
342 | 1205 tear_down_region_lists(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1206 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1207 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1208 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1209 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1210 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1211 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1212 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1213 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1214 |
342 | 1215 if (g1_policy()->in_young_gc_mode()) { |
1216 empty_young_list(); | |
1217 g1_policy()->set_full_young_gcs(true); | |
1218 } | |
1219 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1220 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1221 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1222 |
342 | 1223 // Temporarily make reference _discovery_ single threaded (non-MT). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
1224 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); |
342 | 1225 |
1226 // Temporarily make refs discovery atomic | |
1227 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1228 | |
1229 // Temporarily clear _is_alive_non_header | |
1230 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1231 | |
1232 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1233 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1234 |
1235 // Do collection work | |
1236 { | |
1237 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1238 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1239 } |
2152 | 1240 assert(free_regions() == 0, "we should not have added any free regions"); |
342 | 1241 rebuild_region_lists(); |
1242 | |
1243 _summary_bytes_used = recalculate_used(); | |
1244 | |
1245 ref_processor()->enqueue_discovered_references(); | |
1246 | |
1247 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1248 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1249 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1250 |
342 | 1251 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1252 HandleMark hm; // Discard invalid handles created during verification | |
1253 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1254 prepare_for_verify(); |
342 | 1255 Universe::verify(false); |
1256 } | |
1257 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1258 | |
1259 reset_gc_time_stamp(); | |
1260 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1261 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1262 // sets. We will also reset the GC time stamps of the regions. |
342 | 1263 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1264 heap_region_iterate(&rs_clear); | |
1265 | |
1266 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1267 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1268 |
1269 if (_cg1r->use_cache()) { | |
1270 _cg1r->clear_and_record_card_counts(); | |
1271 _cg1r->clear_hot_cache(); | |
1272 } | |
1273 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1274 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1275 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1276 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1277 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1278 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1279 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1280 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1281 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1282 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1283 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1284 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1285 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1286 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1287 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1288 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1289 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1290 |
342 | 1291 if (PrintGC) { |
1292 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1293 } | |
1294 | |
1295 if (true) { // FIXME | |
1296 // Ask the permanent generation to adjust size for full collections | |
1297 perm()->compute_new_size(); | |
1298 } | |
1299 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1300 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1301 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1302 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1303 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1304 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1305 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1306 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1307 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1308 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1309 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1310 |
342 | 1311 double end = os::elapsedTime(); |
1312 g1_policy()->record_full_collection_end(); | |
1313 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1314 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1315 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1316 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1317 |
342 | 1318 gc_epilogue(true); |
1319 | |
794 | 1320 // Discard all rset updates |
1321 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1322 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1323 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1324 } |
1325 | |
1326 if (g1_policy()->in_young_gc_mode()) { | |
1327 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1328 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1329 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1330 assert( check_young_list_empty(true /* check_heap */), |
342 | 1331 "young list should be empty at this point"); |
1332 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1333 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1334 // Update the number of full collections that have been completed. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
1335 increment_full_collections_completed(false /* concurrent */); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1336 |
2152 | 1337 verify_region_sets_optional(); |
1338 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1339 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1340 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1341 } |
1973 | 1342 |
1343 return true; | |
342 | 1344 } |
1345 | |
1346 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1347 // do_collection() will return whether it succeeded in performing |
1348 // the GC. Currently, there is no facility on the | |
1349 // do_full_collection() API to notify the caller than the collection | |
1350 // did not succeed (e.g., because it was locked out by the GC | |
1351 // locker). So, right now, we'll ignore the return value. | |
1352 bool dummy = do_collection(true, /* explicit_gc */ | |
1353 clear_all_soft_refs, | |
1354 0 /* word_size */); | |
342 | 1355 } |
1356 | |
1357 // This code is mostly copied from TenuredGeneration. | |
1358 void | |
1359 G1CollectedHeap:: | |
1360 resize_if_necessary_after_full_collection(size_t word_size) { | |
1361 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1362 | |
1363 // Include the current allocation, if any, and bytes that will be | |
1364 // pre-allocated to support collections, as "used". | |
1365 const size_t used_after_gc = used(); | |
1366 const size_t capacity_after_gc = capacity(); | |
1367 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1368 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1369 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1370 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1371 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1372 |
342 | 1373 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1374 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1375 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1376 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1377 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1378 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1379 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1380 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1381 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1382 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1383 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1384 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1385 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1386 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1387 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1388 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1389 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1390 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1391 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1392 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1393 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1394 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1395 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1396 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1397 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1398 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1399 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1400 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1401 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1402 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1403 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1404 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1405 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1406 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1407 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1408 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1409 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1410 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1411 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1412 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1413 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1414 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1415 |
1416 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1417 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1418 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1419 gclog_or_tty->print_cr("Computing new size after full GC "); |
1420 gclog_or_tty->print_cr(" " | |
1421 " minimum_free_percentage: %6.2f", | |
1422 minimum_free_percentage); | |
1423 gclog_or_tty->print_cr(" " | |
1424 " maximum_free_percentage: %6.2f", | |
1425 maximum_free_percentage); | |
1426 gclog_or_tty->print_cr(" " | |
1427 " capacity: %6.1fK" | |
1428 " minimum_desired_capacity: %6.1fK" | |
1429 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1430 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1432 (double) maximum_desired_capacity / (double) K); |
342 | 1433 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 (double) used_after_gc / (double) K); |
342 | 1438 gclog_or_tty->print_cr(" " |
1439 " free_percentage: %6.2f", | |
1440 free_percentage); | |
1441 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1443 // Don't expand unless it's significant |
1444 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1445 if (expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1446 if (PrintGC && Verbose) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1447 gclog_or_tty->print_cr(" " |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1448 " expanding:" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1449 " max_heap_size: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1450 " minimum_desired_capacity: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1451 " expand_bytes: %6.1fK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1452 (double) max_heap_size / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1453 (double) minimum_desired_capacity / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1454 (double) expand_bytes / (double) K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1455 } |
342 | 1456 } |
1457 | |
1458 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1459 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1460 // Capacity too large, compute shrinking size |
1461 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1462 shrink(shrink_bytes); | |
1463 if (PrintGC && Verbose) { | |
1464 gclog_or_tty->print_cr(" " | |
1465 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1466 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1467 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1468 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1469 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1470 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1471 (double) shrink_bytes / (double) K); |
342 | 1472 } |
1473 } | |
1474 } | |
1475 | |
1476 | |
1477 HeapWord* | |
1973 | 1478 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1479 bool* succeeded) { | |
2152 | 1480 assert_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1481 |
1482 *succeeded = true; | |
1483 // Let's attempt the allocation first. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1484 HeapWord* result = |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1485 attempt_allocation_at_safepoint(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1486 false /* expect_null_mutator_alloc_region */); |
1973 | 1487 if (result != NULL) { |
1488 assert(*succeeded, "sanity"); | |
1489 return result; | |
1490 } | |
342 | 1491 |
1492 // In a G1 heap, we're supposed to keep allocation from failing by | |
1493 // incremental pauses. Therefore, at least for now, we'll favor | |
1494 // expansion over collection. (This might change in the future if we can | |
1495 // do something smarter than full collection to satisfy a failed alloc.) | |
1496 result = expand_and_allocate(word_size); | |
1497 if (result != NULL) { | |
1973 | 1498 assert(*succeeded, "sanity"); |
342 | 1499 return result; |
1500 } | |
1501 | |
1973 | 1502 // Expansion didn't work, we'll try to do a Full GC. |
1503 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1504 false, /* clear_all_soft_refs */ | |
1505 word_size); | |
1506 if (!gc_succeeded) { | |
1507 *succeeded = false; | |
1508 return NULL; | |
1509 } | |
1510 | |
1511 // Retry the allocation | |
1512 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1513 true /* expect_null_mutator_alloc_region */); |
342 | 1514 if (result != NULL) { |
1973 | 1515 assert(*succeeded, "sanity"); |
342 | 1516 return result; |
1517 } | |
1518 | |
1973 | 1519 // Then, try a Full GC that will collect all soft references. |
1520 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1521 true, /* clear_all_soft_refs */ | |
1522 word_size); | |
1523 if (!gc_succeeded) { | |
1524 *succeeded = false; | |
1525 return NULL; | |
1526 } | |
1527 | |
1528 // Retry the allocation once more | |
1529 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1530 true /* expect_null_mutator_alloc_region */); |
342 | 1531 if (result != NULL) { |
1973 | 1532 assert(*succeeded, "sanity"); |
342 | 1533 return result; |
1534 } | |
1535 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1536 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1537 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1538 |
342 | 1539 // What else? We might try synchronous finalization later. If the total |
1540 // space available is large enough for the allocation, then a more | |
1541 // complete compaction phase than we've tried so far might be | |
1542 // appropriate. | |
1973 | 1543 assert(*succeeded, "sanity"); |
342 | 1544 return NULL; |
1545 } | |
1546 | |
1547 // Attempting to expand the heap sufficiently | |
1548 // to support an allocation of the given "word_size". If | |
1549 // successful, perform the allocation and return the address of the | |
1550 // allocated block, or else "NULL". | |
1551 | |
1552 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
2152 | 1553 assert_at_safepoint(true /* should_be_vm_thread */); |
1554 | |
1555 verify_region_sets_optional(); | |
1973 | 1556 |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1557 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1558 if (expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1559 verify_region_sets_optional(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1560 return attempt_allocation_at_safepoint(word_size, |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1561 false /* expect_null_mutator_alloc_region */); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1562 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1563 return NULL; |
342 | 1564 } |
1565 | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1566 bool G1CollectedHeap::expand(size_t expand_bytes) { |
342 | 1567 size_t old_mem_size = _g1_storage.committed_size(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1568 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
342 | 1569 aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
1570 HeapRegion::GrainBytes); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1571 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1572 if (Verbose && PrintGC) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1573 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1574 old_mem_size/K, aligned_expand_bytes/K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1575 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1576 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1577 HeapWord* old_end = (HeapWord*)_g1_storage.high(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1578 bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1579 if (successful) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1580 HeapWord* new_end = (HeapWord*)_g1_storage.high(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1581 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1582 // Expand the committed region. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1583 _g1_committed.set_end(new_end); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1584 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1585 // Tell the cardtable about the expansion. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1586 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1587 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1588 // And the offset table as well. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1589 _bot_shared->resize(_g1_committed.word_size()); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1590 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1591 expand_bytes = aligned_expand_bytes; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1592 HeapWord* base = old_end; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1593 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1594 // Create the heap regions for [old_end, new_end) |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1595 while (expand_bytes > 0) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1596 HeapWord* high = base + HeapRegion::GrainWords; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1597 |
342 | 1598 // Create a new HeapRegion. |
1599 MemRegion mr(base, high); | |
1600 bool is_zeroed = !_g1_max_committed.contains(base); | |
1601 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1602 | |
1603 // Add it to the HeapRegionSeq. | |
1604 _hrs->insert(hr); | |
2152 | 1605 _free_list.add_as_tail(hr); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1606 |
342 | 1607 // And we used up an expansion region to create it. |
1608 _expansion_regions--; | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1609 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1610 expand_bytes -= HeapRegion::GrainBytes; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1611 base += HeapRegion::GrainWords; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1612 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1613 assert(base == new_end, "sanity"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1614 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1615 // Now update max_committed if necessary. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1616 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end)); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1617 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1618 } else { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1619 // The expansion of the virtual storage space was unsuccessful. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1620 // Let's see if it was because we ran out of swap. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1621 if (G1ExitOnExpansionFailure && |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1622 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1623 // We had head room... |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1624 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); |
342 | 1625 } |
1626 } | |
2152 | 1627 |
342 | 1628 if (Verbose && PrintGC) { |
1629 size_t new_mem_size = _g1_storage.committed_size(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1630 gclog_or_tty->print_cr("...%s, expanded to %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1631 (successful ? "Successful" : "Failed"), |
342 | 1632 new_mem_size/K); |
1633 } | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1634 return successful; |
342 | 1635 } |
1636 | |
1637 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1638 { | |
1639 size_t old_mem_size = _g1_storage.committed_size(); | |
1640 size_t aligned_shrink_bytes = | |
1641 ReservedSpace::page_align_size_down(shrink_bytes); | |
1642 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1643 HeapRegion::GrainBytes); | |
1644 size_t num_regions_deleted = 0; | |
1645 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1646 | |
1647 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1648 if (mr.byte_size() > 0) | |
1649 _g1_storage.shrink_by(mr.byte_size()); | |
1650 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1651 | |
1652 _g1_committed.set_end(mr.start()); | |
1653 _expansion_regions += num_regions_deleted; | |
1654 | |
1655 // Tell the cardtable about it. | |
1656 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1657 | |
1658 // And the offset table as well. | |
1659 _bot_shared->resize(_g1_committed.word_size()); | |
1660 | |
1661 HeapRegionRemSet::shrink_heap(n_regions()); | |
1662 | |
1663 if (Verbose && PrintGC) { | |
1664 size_t new_mem_size = _g1_storage.committed_size(); | |
1665 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1666 old_mem_size/K, aligned_shrink_bytes/K, | |
1667 new_mem_size/K); | |
1668 } | |
1669 } | |
1670 | |
1671 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
2152 | 1672 verify_region_sets_optional(); |
1673 | |
636 | 1674 release_gc_alloc_regions(true /* totally */); |
2152 | 1675 // Instead of tearing down / rebuilding the free lists here, we |
1676 // could instead use the remove_all_pending() method on free_list to | |
1677 // remove only the ones that we need to remove. | |
342 | 1678 tear_down_region_lists(); // We will rebuild them in a moment. |
1679 shrink_helper(shrink_bytes); | |
1680 rebuild_region_lists(); | |
2152 | 1681 |
1682 verify_region_sets_optional(); | |
342 | 1683 } |
1684 | |
1685 // Public methods. | |
1686 | |
1687 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1688 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1689 #endif // _MSC_VER | |
1690 | |
1691 | |
1692 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1693 SharedHeap(policy_), | |
1694 _g1_policy(policy_), | |
1111 | 1695 _dirty_card_queue_set(false), |
1705 | 1696 _into_cset_dirty_card_queue_set(false), |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
1697 _is_alive_closure(this), |
342 | 1698 _ref_processor(NULL), |
1699 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1700 _bot_shared(NULL), | |
1701 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1702 _evac_failure_scan_stack(NULL) , | |
1703 _mark_in_progress(false), | |
2152 | 1704 _cg1r(NULL), _summary_bytes_used(0), |
342 | 1705 _refine_cte_cl(NULL), |
1706 _full_collection(false), | |
2152 | 1707 _free_list("Master Free List"), |
1708 _secondary_free_list("Secondary Free List"), | |
1709 _humongous_set("Master Humongous Set"), | |
1710 _free_regions_coming(false), | |
342 | 1711 _young_list(new YoungList(this)), |
1712 _gc_time_stamp(0), | |
526 | 1713 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1714 _full_collections_completed(0), |
526 | 1715 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1716 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1717 _dirty_cards_region_list(NULL) { |
342 | 1718 _g1h = this; // To catch bugs. |
1719 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1720 vm_exit_during_initialization("Failed necessary allocation."); | |
1721 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1722 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1723 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1724 |
342 | 1725 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1726 _task_queues = new RefToScanQueueSet(n_queues); | |
1727 | |
1728 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1729 assert(n_rem_sets > 0, "Invariant."); | |
1730 | |
1731 HeapRegionRemSetIterator** iter_arr = | |
1732 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1733 for (int i = 0; i < n_queues; i++) { | |
1734 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1735 } | |
1736 _rem_set_iterator = iter_arr; | |
1737 | |
1738 for (int i = 0; i < n_queues; i++) { | |
1739 RefToScanQueue* q = new RefToScanQueue(); | |
1740 q->initialize(); | |
1741 _task_queues->register_queue(i, q); | |
1742 } | |
1743 | |
1744 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1745 _gc_alloc_regions[ap] = NULL; |
1746 _gc_alloc_region_counts[ap] = 0; | |
1747 _retained_gc_alloc_regions[ap] = NULL; | |
1748 // by default, we do not retain a GC alloc region for each ap; | |
1749 // we'll override this, when appropriate, below | |
1750 _retain_gc_alloc_region[ap] = false; | |
1751 } | |
1752 | |
1753 // We will try to remember the last half-full tenured region we | |
1754 // allocated to at the end of a collection so that we can re-use it | |
1755 // during the next collection. | |
1756 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1757 | |
342 | 1758 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1759 } | |
1760 | |
1761 jint G1CollectedHeap::initialize() { | |
1166 | 1762 CollectedHeap::pre_initialize(); |
342 | 1763 os::enable_vtime(); |
1764 | |
1765 // Necessary to satisfy locking discipline assertions. | |
1766 | |
1767 MutexLocker x(Heap_lock); | |
1768 | |
1769 // While there are no constraints in the GC code that HeapWordSize | |
1770 // be any particular value, there are multiple other areas in the | |
1771 // system which believe this to be true (e.g. oop->object_size in some | |
1772 // cases incorrectly returns the size in wordSize units rather than | |
1773 // HeapWordSize). | |
1774 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1775 | |
1776 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1777 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1778 | |
1779 // Ensure that the sizes are properly aligned. | |
1780 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1781 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1782 | |
1783 _cg1r = new ConcurrentG1Refine(); | |
1784 | |
1785 // Reserve the maximum. | |
1786 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1787 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1788 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1789 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1790 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1791 |
342 | 1792 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1793 HeapRegion::GrainBytes, | |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1794 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1795 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1796 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1797 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1798 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1799 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1800 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1801 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1802 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1803 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1804 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1805 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1806 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1807 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1808 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1809 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1810 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1811 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1812 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1813 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1814 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1815 } |
342 | 1816 |
1817 if (!heap_rs.is_reserved()) { | |
1818 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1819 return JNI_ENOMEM; | |
1820 } | |
1821 | |
1822 // It is important to do this in a way such that concurrent readers can't | |
1823 // temporarily think somethings in the heap. (I've actually seen this | |
1824 // happen in asserts: DLD.) | |
1825 _reserved.set_word_size(0); | |
1826 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1827 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1828 | |
1829 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1830 | |
1831 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1832 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1833 set_barrier_set(rem_set()->bs()); | |
1834 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1835 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1836 } else { | |
1837 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1838 return JNI_ENOMEM; | |
1839 } | |
1840 | |
1841 // Also create a G1 rem set. | |
1861 | 1842 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1843 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1844 } else { |
1861 | 1845 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1846 return JNI_ENOMEM; | |
342 | 1847 } |
1848 | |
1849 // Carve out the G1 part of the heap. | |
1850 | |
1851 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1852 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1853 g1_rs.size()/HeapWordSize); | |
1854 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1855 | |
1856 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1857 | |
1858 _g1_storage.initialize(g1_rs, 0); | |
1859 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1860 _g1_max_committed = _g1_committed; | |
393 | 1861 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1862 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1863 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1864 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1865 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1866 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1867 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1868 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1869 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1870 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1871 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1872 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1873 |
2152 | 1874 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); |
1875 | |
342 | 1876 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1877 heap_word_size(init_byte_size)); | |
1878 | |
1879 _g1h = this; | |
1880 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1881 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1882 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1883 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1884 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1885 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1886 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1887 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1888 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1889 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1890 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1891 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1892 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1893 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1894 |
342 | 1895 // Create the ConcurrentMark data structure and thread. |
1896 // (Must do this late, so that "max_regions" is defined.) | |
1897 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1898 _cmThread = _cm->cmThread(); | |
1899 | |
1900 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1901 HeapRegionRemSet::init_heap(max_regions()); | |
1902 | |
677 | 1903 // Now expand into the initial heap size. |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1904 if (!expand(init_byte_size)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1905 vm_exit_during_initialization("Failed to allocate initial heap."); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1906 return JNI_ENOMEM; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1907 } |
342 | 1908 |
1909 // Perform any initialization actions delegated to the policy. | |
1910 g1_policy()->init(); | |
1911 | |
1912 g1_policy()->note_start_of_mark_thread(); | |
1913 | |
1914 _refine_cte_cl = | |
1915 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1916 g1_rem_set(), | |
1917 concurrent_g1_refine()); | |
1918 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1919 | |
1920 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1921 SATB_Q_FL_lock, | |
1111 | 1922 G1SATBProcessCompletedThreshold, |
342 | 1923 Shared_SATB_Q_lock); |
794 | 1924 |
1925 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1926 DirtyCardQ_FL_lock, | |
1111 | 1927 concurrent_g1_refine()->yellow_zone(), |
1928 concurrent_g1_refine()->red_zone(), | |
794 | 1929 Shared_DirtyCardQ_lock); |
1930 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1931 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1932 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1933 DirtyCardQ_FL_lock, |
1111 | 1934 -1, // never trigger processing |
1935 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1936 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1937 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1938 } |
1705 | 1939 |
1940 // Initialize the card queue set used to hold cards containing | |
1941 // references into the collection set. | |
1942 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1943 DirtyCardQ_FL_lock, | |
1944 -1, // never trigger processing | |
1945 -1, // no limit on length | |
1946 Shared_DirtyCardQ_lock, | |
1947 &JavaThread::dirty_card_queue_set()); | |
1948 | |
342 | 1949 // In case we're keeping closure specialization stats, initialize those |
1950 // counts and that mechanism. | |
1951 SpecializationStats::clear(); | |
1952 | |
1953 _gc_alloc_region_list = NULL; | |
1954 | |
1955 // Do later initialization work for concurrent refinement. | |
1956 _cg1r->init(); | |
1957 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1958 // Here we allocate the dummy full region that is required by the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1959 // G1AllocRegion class. If we don't pass an address in the reserved |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1960 // space here, lots of asserts fire. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1961 MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1962 HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1963 // We'll re-use the same region whether the alloc region will |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1964 // require BOT updates or not and, if it doesn't, then a non-young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1965 // region will complain that it cannot support allocations without |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1966 // BOT updates. So we'll tag the dummy region as young to avoid that. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1967 dummy_region->set_young(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1968 // Make sure it's full. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1969 dummy_region->set_top(dummy_region->end()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1970 G1AllocRegion::setup(this, dummy_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1971 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1972 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1973 |
342 | 1974 return JNI_OK; |
1975 } | |
1976 | |
1977 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1978 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1979 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1980 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1981 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1982 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1983 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1984 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1985 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1986 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1987 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1988 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1989 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1990 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1991 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1992 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1993 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1994 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1995 |
342 | 1996 SharedHeap::ref_processing_init(); |
1997 MemRegion mr = reserved_region(); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
1998 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
1999 new ReferenceProcessor(mr, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2000 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2001 (int) ParallelGCThreads, // degree of mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2002 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2003 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2004 false, // Reference discovery is not atomic |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2005 &_is_alive_closure, // is alive closure for efficiency |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2006 true); // Setting next fields of discovered |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2007 // lists requires a barrier. |
342 | 2008 } |
2009 | |
2010 size_t G1CollectedHeap::capacity() const { | |
2011 return _g1_committed.byte_size(); | |
2012 } | |
2013 | |
1705 | 2014 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2015 DirtyCardQueue* into_cset_dcq, | |
2016 bool concurrent, | |
342 | 2017 int worker_i) { |
889 | 2018 // Clean cards in the hot card cache |
1705 | 2019 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2020 |
342 | 2021 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2022 int n_completed_buffers = 0; | |
1705 | 2023 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2024 n_completed_buffers++; |
2025 } | |
2026 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2027 (double) n_completed_buffers); | |
2028 dcqs.clear_n_completed_buffers(); | |
2029 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2030 } | |
2031 | |
2032 | |
2033 // Computes the sum of the storage used by the various regions. | |
2034 | |
2035 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2036 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2037 "Should be owned on this thread's behalf."); |
342 | 2038 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2039 // Read only once in case it is set to NULL concurrently |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2040 HeapRegion* hr = _mutator_alloc_region.get(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2041 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2042 result += hr->used(); |
342 | 2043 return result; |
2044 } | |
2045 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2046 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2047 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2048 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2049 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2050 |
342 | 2051 class SumUsedClosure: public HeapRegionClosure { |
2052 size_t _used; | |
2053 public: | |
2054 SumUsedClosure() : _used(0) {} | |
2055 bool doHeapRegion(HeapRegion* r) { | |
2056 if (!r->continuesHumongous()) { | |
2057 _used += r->used(); | |
2058 } | |
2059 return false; | |
2060 } | |
2061 size_t result() { return _used; } | |
2062 }; | |
2063 | |
2064 size_t G1CollectedHeap::recalculate_used() const { | |
2065 SumUsedClosure blk; | |
2066 _hrs->iterate(&blk); | |
2067 return blk.result(); | |
2068 } | |
2069 | |
2070 #ifndef PRODUCT | |
2071 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2072 size_t _num; | |
2073 public: | |
677 | 2074 SumUsedRegionsClosure() : _num(0) {} |
342 | 2075 bool doHeapRegion(HeapRegion* r) { |
2076 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2077 _num += 1; | |
2078 } | |
2079 return false; | |
2080 } | |
2081 size_t result() { return _num; } | |
2082 }; | |
2083 | |
2084 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2085 SumUsedRegionsClosure blk; | |
2086 _hrs->iterate(&blk); | |
2087 return blk.result(); | |
2088 } | |
2089 #endif // PRODUCT | |
2090 | |
2091 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2152 | 2092 if (free_regions() > 0) return HeapRegion::GrainBytes; |
342 | 2093 // otherwise, is there space in the current allocation region? |
2094 | |
2095 // We need to store the current allocation region in a local variable | |
2096 // here. The problem is that this method doesn't take any locks and | |
2097 // there may be other threads which overwrite the current allocation | |
2098 // region field. attempt_allocation(), for example, sets it to NULL | |
2099 // and this can happen *after* the NULL check here but before the call | |
2100 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2101 // to be a problem in the optimized build, since the two loads of the | |
2102 // current allocation region field are optimized away. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2103 HeapRegion* hr = _mutator_alloc_region.get(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2104 if (hr == NULL) { |
342 | 2105 return 0; |
2106 } | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2107 return hr->free(); |
342 | 2108 } |
2109 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2110 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2111 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2112 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2113 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2114 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2115 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2116 #ifndef PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2117 void G1CollectedHeap::allocate_dummy_regions() { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2118 // Let's fill up most of the region |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2119 size_t word_size = HeapRegion::GrainWords - 1024; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2120 // And as a result the region we'll allocate will be humongous. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2121 guarantee(isHumongous(word_size), "sanity"); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2122 |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2123 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2124 // Let's use the existing mechanism for the allocation |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2125 HeapWord* dummy_obj = humongous_obj_allocate(word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2126 if (dummy_obj != NULL) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2127 MemRegion mr(dummy_obj, word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2128 CollectedHeap::fill_with_object(mr); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2129 } else { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2130 // If we can't allocate once, we probably cannot allocate |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2131 // again. Let's get out of the loop. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2132 break; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2133 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2134 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2135 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2136 #endif // !PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2137 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2138 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2139 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2140 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2141 // We assume that if concurrent == true, then the caller is a |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2142 // concurrent thread that was joined the Suspendible Thread |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2143 // Set. If there's ever a cheap way to check this, we should add an |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2144 // assert here. |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2145 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2146 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2147 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2148 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2149 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2150 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2151 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2152 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2153 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2154 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2155 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2156 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2157 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2158 // This is the case for the inner caller, i.e. a Full GC. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2159 assert(concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2160 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2161 (full_collections_started == _full_collections_completed + 2), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2162 err_msg("for inner caller (Full GC): full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2163 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2164 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2165 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2166 // This is the case for the outer caller, i.e. the concurrent cycle. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2167 assert(!concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2168 (full_collections_started == _full_collections_completed + 1), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2169 err_msg("for outer caller (concurrent cycle): " |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2170 "full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2171 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2172 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2173 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2174 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2175 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2176 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2177 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2178 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2179 // incorrectly see that a marking cyle is still in progress. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2180 if (concurrent) { |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2181 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2182 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2183 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2184 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2185 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2186 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2187 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2188 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2189 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2190 |
342 | 2191 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2152 | 2192 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 2193 GCCauseSetter gcs(this, cause); |
2194 switch (cause) { | |
2195 case GCCause::_heap_inspection: | |
2196 case GCCause::_heap_dump: { | |
2197 HandleMark hm; | |
2198 do_full_collection(false); // don't clear all soft refs | |
2199 break; | |
2200 } | |
2201 default: // XXX FIX ME | |
2202 ShouldNotReachHere(); // Unexpected use of this function | |
2203 } | |
2204 } | |
2205 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2206 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2207 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2208 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2209 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2210 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2211 unsigned int full_gc_count_before; |
342 | 2212 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2213 MutexLocker ml(Heap_lock); |
1973 | 2214 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2215 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2216 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2217 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2218 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2219 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2220 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2221 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2222 // concurrent cycle. We're setting word_size to 0 which means that |
2223 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2224 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2225 0, /* word_size */ |
2226 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2227 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2228 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2229 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2230 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2231 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2232 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2233 |
1973 | 2234 // Schedule a standard evacuation pause. We're setting word_size |
2235 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2236 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2237 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2238 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2239 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2240 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2241 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2242 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2243 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2244 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2245 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2246 } |
342 | 2247 } |
2248 } | |
2249 | |
2250 bool G1CollectedHeap::is_in(const void* p) const { | |
2251 if (_g1_committed.contains(p)) { | |
2252 HeapRegion* hr = _hrs->addr_to_region(p); | |
2253 return hr->is_in(p); | |
2254 } else { | |
2255 return _perm_gen->as_gen()->is_in(p); | |
2256 } | |
2257 } | |
2258 | |
2259 // Iteration functions. | |
2260 | |
2261 // Iterates an OopClosure over all ref-containing fields of objects | |
2262 // within a HeapRegion. | |
2263 | |
2264 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2265 MemRegion _mr; | |
2266 OopClosure* _cl; | |
2267 public: | |
2268 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2269 : _mr(mr), _cl(cl) {} | |
2270 bool doHeapRegion(HeapRegion* r) { | |
2271 if (! r->continuesHumongous()) { | |
2272 r->oop_iterate(_cl); | |
2273 } | |
2274 return false; | |
2275 } | |
2276 }; | |
2277 | |
678 | 2278 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2279 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2280 _hrs->iterate(&blk); | |
678 | 2281 if (do_perm) { |
2282 perm_gen()->oop_iterate(cl); | |
2283 } | |
342 | 2284 } |
2285 | |
678 | 2286 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2287 IterateOopClosureRegionClosure blk(mr, cl); |
2288 _hrs->iterate(&blk); | |
678 | 2289 if (do_perm) { |
2290 perm_gen()->oop_iterate(cl); | |
2291 } | |
342 | 2292 } |
2293 | |
2294 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2295 | |
2296 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2297 ObjectClosure* _cl; | |
2298 public: | |
2299 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2300 bool doHeapRegion(HeapRegion* r) { | |
2301 if (! r->continuesHumongous()) { | |
2302 r->object_iterate(_cl); | |
2303 } | |
2304 return false; | |
2305 } | |
2306 }; | |
2307 | |
678 | 2308 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2309 IterateObjectClosureRegionClosure blk(cl); |
2310 _hrs->iterate(&blk); | |
678 | 2311 if (do_perm) { |
2312 perm_gen()->object_iterate(cl); | |
2313 } | |
342 | 2314 } |
2315 | |
2316 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2317 // FIXME: is this right? | |
2318 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2319 } | |
2320 | |
2321 // Calls a SpaceClosure on a HeapRegion. | |
2322 | |
2323 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2324 SpaceClosure* _cl; | |
2325 public: | |
2326 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2327 bool doHeapRegion(HeapRegion* r) { | |
2328 _cl->do_space(r); | |
2329 return false; | |
2330 } | |
2331 }; | |
2332 | |
2333 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2334 SpaceClosureRegionClosure blk(cl); | |
2335 _hrs->iterate(&blk); | |
2336 } | |
2337 | |
2338 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
2339 _hrs->iterate(cl); | |
2340 } | |
2341 | |
2342 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
2343 HeapRegionClosure* cl) { | |
2344 _hrs->iterate_from(r, cl); | |
2345 } | |
2346 | |
2347 void | |
2348 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
2349 _hrs->iterate_from(idx, cl); | |
2350 } | |
2351 | |
2352 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
2353 | |
2354 void | |
2355 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2356 int worker, | |
2357 jint claim_value) { | |
355 | 2358 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2359 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2360 // try to spread out the starting points of the workers |
2361 const size_t start_index = regions / worker_num * (size_t) worker; | |
2362 | |
2363 // each worker will actually look at all regions | |
2364 for (size_t count = 0; count < regions; ++count) { | |
2365 const size_t index = (start_index + count) % regions; | |
2366 assert(0 <= index && index < regions, "sanity"); | |
2367 HeapRegion* r = region_at(index); | |
2368 // we'll ignore "continues humongous" regions (we'll process them | |
2369 // when we come across their corresponding "start humongous" | |
2370 // region) and regions already claimed | |
2371 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2372 continue; | |
2373 } | |
2374 // OK, try to claim it | |
342 | 2375 if (r->claimHeapRegion(claim_value)) { |
355 | 2376 // success! |
2377 assert(!r->continuesHumongous(), "sanity"); | |
2378 if (r->startsHumongous()) { | |
2379 // If the region is "starts humongous" we'll iterate over its | |
2380 // "continues humongous" first; in fact we'll do them | |
2381 // first. The order is important. In on case, calling the | |
2382 // closure on the "starts humongous" region might de-allocate | |
2383 // and clear all its "continues humongous" regions and, as a | |
2384 // result, we might end up processing them twice. So, we'll do | |
2385 // them first (notice: most closures will ignore them anyway) and | |
2386 // then we'll do the "starts humongous" region. | |
2387 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2388 HeapRegion* chr = region_at(ch_index); | |
2389 | |
2390 // if the region has already been claimed or it's not | |
2391 // "continues humongous" we're done | |
2392 if (chr->claim_value() == claim_value || | |
2393 !chr->continuesHumongous()) { | |
2394 break; | |
2395 } | |
2396 | |
2397 // Noone should have claimed it directly. We can given | |
2398 // that we claimed its "starts humongous" region. | |
2399 assert(chr->claim_value() != claim_value, "sanity"); | |
2400 assert(chr->humongous_start_region() == r, "sanity"); | |
2401 | |
2402 if (chr->claimHeapRegion(claim_value)) { | |
2403 // we should always be able to claim it; noone else should | |
2404 // be trying to claim this region | |
2405 | |
2406 bool res2 = cl->doHeapRegion(chr); | |
2407 assert(!res2, "Should not abort"); | |
2408 | |
2409 // Right now, this holds (i.e., no closure that actually | |
2410 // does something with "continues humongous" regions | |
2411 // clears them). We might have to weaken it in the future, | |
2412 // but let's leave these two asserts here for extra safety. | |
2413 assert(chr->continuesHumongous(), "should still be the case"); | |
2414 assert(chr->humongous_start_region() == r, "sanity"); | |
2415 } else { | |
2416 guarantee(false, "we should not reach here"); | |
2417 } | |
2418 } | |
2419 } | |
2420 | |
2421 assert(!r->continuesHumongous(), "sanity"); | |
2422 bool res = cl->doHeapRegion(r); | |
2423 assert(!res, "Should not abort"); | |
2424 } | |
2425 } | |
2426 } | |
2427 | |
390 | 2428 class ResetClaimValuesClosure: public HeapRegionClosure { |
2429 public: | |
2430 bool doHeapRegion(HeapRegion* r) { | |
2431 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2432 return false; | |
2433 } | |
2434 }; | |
2435 | |
2436 void | |
2437 G1CollectedHeap::reset_heap_region_claim_values() { | |
2438 ResetClaimValuesClosure blk; | |
2439 heap_region_iterate(&blk); | |
2440 } | |
2441 | |
355 | 2442 #ifdef ASSERT |
2443 // This checks whether all regions in the heap have the correct claim | |
2444 // value. I also piggy-backed on this a check to ensure that the | |
2445 // humongous_start_region() information on "continues humongous" | |
2446 // regions is correct. | |
2447 | |
2448 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2449 private: | |
2450 jint _claim_value; | |
2451 size_t _failures; | |
2452 HeapRegion* _sh_region; | |
2453 public: | |
2454 CheckClaimValuesClosure(jint claim_value) : | |
2455 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2456 bool doHeapRegion(HeapRegion* r) { | |
2457 if (r->claim_value() != _claim_value) { | |
2458 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2459 "claim value = %d, should be %d", | |
2460 r->bottom(), r->end(), r->claim_value(), | |
2461 _claim_value); | |
2462 ++_failures; | |
2463 } | |
2464 if (!r->isHumongous()) { | |
2465 _sh_region = NULL; | |
2466 } else if (r->startsHumongous()) { | |
2467 _sh_region = r; | |
2468 } else if (r->continuesHumongous()) { | |
2469 if (r->humongous_start_region() != _sh_region) { | |
2470 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2471 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2472 r->bottom(), r->end(), | |
2473 r->humongous_start_region(), | |
2474 _sh_region); | |
2475 ++_failures; | |
342 | 2476 } |
2477 } | |
355 | 2478 return false; |
2479 } | |
2480 size_t failures() { | |
2481 return _failures; | |
2482 } | |
2483 }; | |
2484 | |
2485 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2486 CheckClaimValuesClosure cl(claim_value); | |
2487 heap_region_iterate(&cl); | |
2488 return cl.failures() == 0; | |
2489 } | |
2490 #endif // ASSERT | |
342 | 2491 |
2492 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2493 HeapRegion* r = g1_policy()->collection_set(); | |
2494 while (r != NULL) { | |
2495 HeapRegion* next = r->next_in_collection_set(); | |
2496 if (cl->doHeapRegion(r)) { | |
2497 cl->incomplete(); | |
2498 return; | |
2499 } | |
2500 r = next; | |
2501 } | |
2502 } | |
2503 | |
2504 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2505 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2506 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2507 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2508 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2509 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2510 |
342 | 2511 assert(r->in_collection_set(), |
2512 "Start region must be a member of the collection set."); | |
2513 HeapRegion* cur = r; | |
2514 while (cur != NULL) { | |
2515 HeapRegion* next = cur->next_in_collection_set(); | |
2516 if (cl->doHeapRegion(cur) && false) { | |
2517 cl->incomplete(); | |
2518 return; | |
2519 } | |
2520 cur = next; | |
2521 } | |
2522 cur = g1_policy()->collection_set(); | |
2523 while (cur != r) { | |
2524 HeapRegion* next = cur->next_in_collection_set(); | |
2525 if (cl->doHeapRegion(cur) && false) { | |
2526 cl->incomplete(); | |
2527 return; | |
2528 } | |
2529 cur = next; | |
2530 } | |
2531 } | |
2532 | |
2533 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2534 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2535 } | |
2536 | |
2537 | |
2538 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2539 Space* res = heap_region_containing(addr); | |
2540 if (res == NULL) | |
2541 res = perm_gen()->space_containing(addr); | |
2542 return res; | |
2543 } | |
2544 | |
2545 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2546 Space* sp = space_containing(addr); | |
2547 if (sp != NULL) { | |
2548 return sp->block_start(addr); | |
2549 } | |
2550 return NULL; | |
2551 } | |
2552 | |
2553 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2554 Space* sp = space_containing(addr); | |
2555 assert(sp != NULL, "block_size of address outside of heap"); | |
2556 return sp->block_size(addr); | |
2557 } | |
2558 | |
2559 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2560 Space* sp = space_containing(addr); | |
2561 return sp->block_is_obj(addr); | |
2562 } | |
2563 | |
2564 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2565 return true; | |
2566 } | |
2567 | |
2568 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2569 return HeapRegion::GrainBytes; | |
2570 } | |
2571 | |
2572 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2573 // Return the remaining space in the cur alloc region, but not less than | |
2574 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2575 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2576 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2577 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2578 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2579 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2580 HeapRegion* hr = _mutator_alloc_region.get(); |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2581 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2582 if (hr == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2583 return max_tlab_size; |
342 | 2584 } else { |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2585 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); |
342 | 2586 } |
2587 } | |
2588 | |
2589 size_t G1CollectedHeap::large_typearray_limit() { | |
2590 // FIXME | |
2591 return HeapRegion::GrainBytes/HeapWordSize; | |
2592 } | |
2593 | |
2594 size_t G1CollectedHeap::max_capacity() const { | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
2595 return _g1_reserved.byte_size(); |
342 | 2596 } |
2597 | |
2598 jlong G1CollectedHeap::millis_since_last_gc() { | |
2599 // assert(false, "NYI"); | |
2600 return 0; | |
2601 } | |
2602 | |
2603 void G1CollectedHeap::prepare_for_verify() { | |
2604 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2605 ensure_parsability(false); | |
2606 } | |
2607 g1_rem_set()->prepare_for_verify(); | |
2608 } | |
2609 | |
2610 class VerifyLivenessOopClosure: public OopClosure { | |
2611 G1CollectedHeap* g1h; | |
2612 public: | |
2613 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2614 g1h = _g1h; | |
2615 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2616 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2617 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2618 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2619 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2620 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2621 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2622 "Dead object referenced by a not dead object"); |
342 | 2623 } |
2624 }; | |
2625 | |
2626 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2627 private: |
342 | 2628 G1CollectedHeap* _g1h; |
2629 size_t _live_bytes; | |
2630 HeapRegion *_hr; | |
811 | 2631 bool _use_prev_marking; |
342 | 2632 public: |
811 | 2633 // use_prev_marking == true -> use "prev" marking information, |
2634 // use_prev_marking == false -> use "next" marking information | |
2635 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2636 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2637 _g1h = G1CollectedHeap::heap(); |
2638 } | |
2639 void do_object(oop o) { | |
2640 VerifyLivenessOopClosure isLive(_g1h); | |
2641 assert(o != NULL, "Huh?"); | |
811 | 2642 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2643 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2644 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2645 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2646 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2647 } |
342 | 2648 } |
2649 } | |
2650 size_t live_bytes() { return _live_bytes; } | |
2651 }; | |
2652 | |
2653 class PrintObjsInRegionClosure : public ObjectClosure { | |
2654 HeapRegion *_hr; | |
2655 G1CollectedHeap *_g1; | |
2656 public: | |
2657 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2658 _g1 = G1CollectedHeap::heap(); | |
2659 }; | |
2660 | |
2661 void do_object(oop o) { | |
2662 if (o != NULL) { | |
2663 HeapWord *start = (HeapWord *) o; | |
2664 size_t word_sz = o->size(); | |
2665 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2666 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2667 (void*) o, word_sz, | |
2668 _g1->isMarkedPrev(o), | |
2669 _g1->isMarkedNext(o), | |
2670 _hr->obj_allocated_since_prev_marking(o)); | |
2671 HeapWord *end = start + word_sz; | |
2672 HeapWord *cur; | |
2673 int *val; | |
2674 for (cur = start; cur < end; cur++) { | |
2675 val = (int *) cur; | |
2676 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2677 } | |
2678 } | |
2679 } | |
2680 }; | |
2681 | |
2682 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2683 private: |
342 | 2684 bool _allow_dirty; |
390 | 2685 bool _par; |
811 | 2686 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2687 bool _failures; |
811 | 2688 public: |
2689 // use_prev_marking == true -> use "prev" marking information, | |
2690 // use_prev_marking == false -> use "next" marking information | |
2691 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2692 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2693 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2694 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2695 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2696 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2697 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2698 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2699 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2700 |
342 | 2701 bool doHeapRegion(HeapRegion* r) { |
390 | 2702 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2703 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2704 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2705 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2706 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2707 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2708 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2709 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2710 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2711 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2712 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2713 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2714 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2715 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2716 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2717 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2718 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2719 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2720 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2721 } |
342 | 2722 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2723 return false; // stop the region iteration if we hit a failure |
342 | 2724 } |
2725 }; | |
2726 | |
2727 class VerifyRootsClosure: public OopsInGenClosure { | |
2728 private: | |
2729 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2730 bool _use_prev_marking; |
342 | 2731 bool _failures; |
2732 public: | |
811 | 2733 // use_prev_marking == true -> use "prev" marking information, |
2734 // use_prev_marking == false -> use "next" marking information | |
2735 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2736 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2737 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2738 _failures(false) { } |
342 | 2739 |
2740 bool failures() { return _failures; } | |
2741 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2742 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2743 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2744 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2745 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2746 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2747 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2748 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2749 obj->print_on(gclog_or_tty); |
2750 _failures = true; | |
2751 } | |
2752 } | |
2753 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2754 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2755 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2756 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2757 }; |
2758 | |
390 | 2759 // This is the task used for parallel heap verification. |
2760 | |
2761 class G1ParVerifyTask: public AbstractGangTask { | |
2762 private: | |
2763 G1CollectedHeap* _g1h; | |
2764 bool _allow_dirty; | |
811 | 2765 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2766 bool _failures; |
390 | 2767 |
2768 public: | |
811 | 2769 // use_prev_marking == true -> use "prev" marking information, |
2770 // use_prev_marking == false -> use "next" marking information | |
2771 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2772 bool use_prev_marking) : | |
390 | 2773 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2774 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2775 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2776 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2777 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2778 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2779 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2780 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2781 } |
390 | 2782 |
2783 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2784 HandleMark hm; |
811 | 2785 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2786 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2787 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2788 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2789 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2790 } |
390 | 2791 } |
2792 }; | |
2793 | |
342 | 2794 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2795 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2796 } | |
2797 | |
2798 void G1CollectedHeap::verify(bool allow_dirty, | |
2799 bool silent, | |
2800 bool use_prev_marking) { | |
342 | 2801 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2802 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2803 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2804 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2805 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2806 false, |
342 | 2807 SharedHeap::SO_AllClasses, |
2808 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2809 &blobsCl, |
342 | 2810 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2811 bool failures = rootsCl.failures(); |
342 | 2812 rem_set()->invalidate(perm_gen()->used_region(), false); |
2152 | 2813 if (!silent) { gclog_or_tty->print("HeapRegionSets "); } |
2814 verify_region_sets(); | |
2815 if (!silent) { gclog_or_tty->print("HeapRegions "); } | |
390 | 2816 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2817 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2818 "sanity check"); | |
2819 | |
811 | 2820 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2821 int n_workers = workers()->total_workers(); |
2822 set_par_threads(n_workers); | |
2823 workers()->run_task(&task); | |
2824 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2825 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2826 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2827 } |
390 | 2828 |
2829 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2830 "sanity check"); | |
2831 | |
2832 reset_heap_region_claim_values(); | |
2833 | |
2834 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2835 "sanity check"); | |
2836 } else { | |
811 | 2837 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2838 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2839 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2840 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2841 } |
390 | 2842 } |
2152 | 2843 if (!silent) gclog_or_tty->print("RemSet "); |
342 | 2844 rem_set()->verify(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2845 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2846 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2847 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2848 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2849 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2850 #ifndef PRODUCT |
1044 | 2851 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2852 concurrent_mark()->print_reachable("at-verification-failure", |
2853 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2854 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2855 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2856 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2857 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2858 guarantee(!failures, "there should not have been any failures"); |
342 | 2859 } else { |
2860 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2861 } | |
2862 } | |
2863 | |
2864 class PrintRegionClosure: public HeapRegionClosure { | |
2865 outputStream* _st; | |
2866 public: | |
2867 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2868 bool doHeapRegion(HeapRegion* r) { | |
2869 r->print_on(_st); | |
2870 return false; | |
2871 } | |
2872 }; | |
2873 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2874 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2875 |
2876 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2877 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2878 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2879 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2880 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2881 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2882 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2883 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2884 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2885 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2886 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2887 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2888 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2889 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2890 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2891 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2892 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2893 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2894 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2895 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2896 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2897 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2898 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2899 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2900 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2901 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2902 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2903 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2904 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2905 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2906 PrintRegionClosure blk(st); |
2907 _hrs->iterate(&blk); | |
2908 } | |
2909 | |
2910 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2911 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2912 workers()->print_worker_threads_on(st); |
2913 } | |
2914 _cmThread->print_on(st); | |
342 | 2915 st->cr(); |
1019 | 2916 _cm->print_worker_threads_on(st); |
2917 _cg1r->print_worker_threads_on(st); | |
342 | 2918 st->cr(); |
2919 } | |
2920 | |
2921 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2922 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 2923 workers()->threads_do(tc); |
2924 } | |
2925 tc->do_thread(_cmThread); | |
794 | 2926 _cg1r->threads_do(tc); |
342 | 2927 } |
2928 | |
2929 void G1CollectedHeap::print_tracing_info() const { | |
2930 // We'll overload this to mean "trace GC pause statistics." | |
2931 if (TraceGen0Time || TraceGen1Time) { | |
2932 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2933 // to that. | |
2934 g1_policy()->print_tracing_info(); | |
2935 } | |
751 | 2936 if (G1SummarizeRSetStats) { |
342 | 2937 g1_rem_set()->print_summary_info(); |
2938 } | |
1282 | 2939 if (G1SummarizeConcMark) { |
342 | 2940 concurrent_mark()->print_summary_info(); |
2941 } | |
2942 g1_policy()->print_yg_surv_rate_info(); | |
2943 SpecializationStats::print(); | |
2944 } | |
2945 | |
2946 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2947 HeapRegion* hr = heap_region_containing(addr); | |
2948 if (hr == NULL) { | |
2949 return 0; | |
2950 } else { | |
2951 return 1; | |
2952 } | |
2953 } | |
2954 | |
2955 G1CollectedHeap* G1CollectedHeap::heap() { | |
2956 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2957 "not a garbage-first heap"); | |
2958 return _g1h; | |
2959 } | |
2960 | |
2961 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2962 // always_do_update_barrier = false; |
342 | 2963 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
2964 // Call allocation profiler | |
2965 AllocationProfiler::iterate_since_last_gc(); | |
2966 // Fill TLAB's and such | |
2967 ensure_parsability(true); | |
2968 } | |
2969 | |
2970 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2971 // FIXME: what is this about? | |
2972 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2973 // is set. | |
2974 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2975 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2976 // always_do_update_barrier = true; |
342 | 2977 } |
2978 | |
1973 | 2979 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
2980 unsigned int gc_count_before, | |
2981 bool* succeeded) { | |
2982 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 2983 g1_policy()->record_stop_world_start(); |
1973 | 2984 VM_G1IncCollectionPause op(gc_count_before, |
2985 word_size, | |
2986 false, /* should_initiate_conc_mark */ | |
2987 g1_policy()->max_pause_time_ms(), | |
2988 GCCause::_g1_inc_collection_pause); | |
2989 VMThread::execute(&op); | |
2990 | |
2991 HeapWord* result = op.result(); | |
2992 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
2993 assert(result == NULL || ret_succeeded, | |
2994 "the result should be NULL if the VM did not succeed"); | |
2995 *succeeded = ret_succeeded; | |
2996 | |
2997 assert_heap_not_locked(); | |
2998 return result; | |
342 | 2999 } |
3000 | |
3001 void | |
3002 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3003 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3004 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3005 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3006 CGC_lock->notify(); |
342 | 3007 } |
3008 } | |
3009 | |
3010 class VerifyMarkedObjsClosure: public ObjectClosure { | |
3011 G1CollectedHeap* _g1h; | |
3012 public: | |
3013 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
3014 void do_object(oop obj) { | |
3015 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
3016 "markandsweep mark should agree with concurrent deadness"); | |
3017 } | |
3018 }; | |
3019 | |
3020 void | |
3021 G1CollectedHeap::checkConcurrentMark() { | |
3022 VerifyMarkedObjsClosure verifycl(this); | |
3023 // MutexLockerEx x(getMarkBitMapLock(), | |
3024 // Mutex::_no_safepoint_check_flag); | |
678 | 3025 object_iterate(&verifycl, false); |
342 | 3026 } |
3027 | |
3028 void G1CollectedHeap::do_sync_mark() { | |
3029 _cm->checkpointRootsInitial(); | |
3030 _cm->markFromRoots(); | |
3031 _cm->checkpointRootsFinal(false); | |
3032 } | |
3033 | |
3034 // <NEW PREDICTION> | |
3035 | |
3036 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3037 bool young) { | |
3038 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3039 } | |
3040 | |
3041 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3042 predicted_time_ms) { | |
3043 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3044 } | |
3045 | |
3046 size_t G1CollectedHeap::pending_card_num() { | |
3047 size_t extra_cards = 0; | |
3048 JavaThread *curr = Threads::first(); | |
3049 while (curr != NULL) { | |
3050 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3051 extra_cards += dcq.size(); | |
3052 curr = curr->next(); | |
3053 } | |
3054 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3055 size_t buffer_size = dcqs.buffer_size(); | |
3056 size_t buffer_num = dcqs.completed_buffers_num(); | |
3057 return buffer_size * buffer_num + extra_cards; | |
3058 } | |
3059 | |
3060 size_t G1CollectedHeap::max_pending_card_num() { | |
3061 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3062 size_t buffer_size = dcqs.buffer_size(); | |
3063 size_t buffer_num = dcqs.completed_buffers_num(); | |
3064 int thread_num = Threads::number_of_threads(); | |
3065 return (buffer_num + thread_num) * buffer_size; | |
3066 } | |
3067 | |
3068 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3069 return g1_rem_set()->cardsScanned(); |
342 | 3070 } |
3071 | |
3072 void | |
3073 G1CollectedHeap::setup_surviving_young_words() { | |
3074 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3075 size_t array_length = g1_policy()->young_cset_length(); | |
3076 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3077 if (_surviving_young_words == NULL) { | |
3078 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3079 "Not enough space for young surv words summary."); | |
3080 } | |
3081 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3082 #ifdef ASSERT |
342 | 3083 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3084 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3085 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3086 #endif // !ASSERT |
342 | 3087 } |
3088 | |
3089 void | |
3090 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3091 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3092 size_t array_length = g1_policy()->young_cset_length(); | |
3093 for (size_t i = 0; i < array_length; ++i) | |
3094 _surviving_young_words[i] += surv_young_words[i]; | |
3095 } | |
3096 | |
3097 void | |
3098 G1CollectedHeap::cleanup_surviving_young_words() { | |
3099 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3100 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3101 _surviving_young_words = NULL; | |
3102 } | |
3103 | |
3104 // </NEW PREDICTION> | |
3105 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3106 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3107 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3108 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3109 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3110 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3111 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3112 |
1709 | 3113 #if TASKQUEUE_STATS |
3114 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3115 st->print_raw_cr("GC Task Stats"); | |
3116 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3117 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3118 } | |
3119 | |
3120 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3121 print_taskqueue_stats_hdr(st); | |
3122 | |
3123 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3124 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3125 for (int i = 0; i < n; ++i) { |
3126 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3127 totals += task_queue(i)->stats; | |
3128 } | |
3129 st->print_raw("tot "); totals.print(st); st->cr(); | |
3130 | |
3131 DEBUG_ONLY(totals.verify()); | |
3132 } | |
3133 | |
3134 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3135 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3136 for (int i = 0; i < n; ++i) { |
3137 task_queue(i)->stats.reset(); | |
3138 } | |
3139 } | |
3140 #endif // TASKQUEUE_STATS | |
3141 | |
1973 | 3142 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3143 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
2152 | 3144 assert_at_safepoint(true /* should_be_vm_thread */); |
3145 guarantee(!is_gc_active(), "collection is not reentrant"); | |
3146 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3147 if (GC_locker::check_active_before_gc()) { |
1973 | 3148 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3149 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3150 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
3151 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
2039
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3152 ResourceMark rm; |
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3153 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3154 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3155 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3156 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3157 |
2152 | 3158 verify_region_sets_optional(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3159 verify_dirty_young_regions(); |
2152 | 3160 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3161 { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3162 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3163 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3164 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3165 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3166 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3167 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3168 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3169 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3170 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3171 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3172 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3173 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3174 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3175 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3176 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3177 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3178 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3179 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3180 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3181 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3182 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3183 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3184 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3185 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3186 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3187 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3188 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3189 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3190 |
2361 | 3191 // If the secondary_free_list is not empty, append it to the |
3192 // free_list. No need to wait for the cleanup operation to finish; | |
3193 // the region allocation code will check the secondary_free_list | |
3194 // and wait if necessary. If the G1StressConcRegionFreeing flag is | |
3195 // set, skip this step so that the region allocation code has to | |
3196 // get entries from the secondary_free_list. | |
2152 | 3197 if (!G1StressConcRegionFreeing) { |
2361 | 3198 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 3199 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3200 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3201 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3202 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3203 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3204 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3205 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3206 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3207 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3208 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3209 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3210 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3211 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3212 increment_total_collections(false /* full gc */); |
342 | 3213 |
3214 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3215 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3216 print(); |
342 | 3217 #endif |
3218 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3219 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3220 HandleMark hm; // Discard invalid handles created during verification |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3221 gclog_or_tty->print(" VerifyBeforeGC:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3222 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3223 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3224 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3225 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3226 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3227 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3228 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3229 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3230 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3231 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3232 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3233 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3234 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3235 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3236 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3237 // of the collection set!). |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3238 release_mutator_alloc_region(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3239 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3240 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3241 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3242 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3243 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3244 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3245 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3246 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3247 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3248 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3249 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3250 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3251 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3252 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3253 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3254 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3255 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3256 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3257 #endif // YOUNG_LIST_VERBOSE |
342 | 3258 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3259 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3260 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3261 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3262 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3263 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3264 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3265 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3267 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3268 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3269 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3270 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3271 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3272 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3273 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3274 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3275 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3276 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3277 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3278 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3279 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3280 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3281 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3282 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3283 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3284 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3285 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3286 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3287 |
1707 | 3288 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3289 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3290 // Nothing to do if we were unable to choose a collection set. |
342 | 3291 #if G1_REM_SET_LOGGING |
1707 | 3292 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3293 print(); | |
342 | 3294 #endif |
1707 | 3295 PrepareForRSScanningClosure prepare_for_rs_scan; |
3296 collection_set_iterate(&prepare_for_rs_scan); | |
3297 | |
3298 setup_surviving_young_words(); | |
3299 | |
3300 // Set up the gc allocation regions. | |
3301 get_gc_alloc_regions(); | |
3302 | |
3303 // Actually do the work... | |
3304 evacuate_collection_set(); | |
3305 | |
3306 free_collection_set(g1_policy()->collection_set()); | |
3307 g1_policy()->clear_collection_set(); | |
3308 | |
3309 cleanup_surviving_young_words(); | |
3310 | |
3311 // Start a new incremental collection set for the next pause. | |
3312 g1_policy()->start_incremental_cset_building(); | |
3313 | |
3314 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3315 // regions to the incremental collection set for the next | |
3316 // evacuation pause. | |
3317 clear_cset_fast_test(); | |
3318 | |
3319 if (g1_policy()->in_young_gc_mode()) { | |
3320 _young_list->reset_sampled_info(); | |
3321 | |
3322 // Don't check the whole heap at this point as the | |
3323 // GC alloc regions from this pause have been tagged | |
3324 // as survivors and moved on to the survivor list. | |
3325 // Survivor regions will fail the !is_young() check. | |
3326 assert(check_young_list_empty(false /* check_heap */), | |
3327 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3328 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3329 #if YOUNG_LIST_VERBOSE |
1707 | 3330 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3331 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3332 #endif // YOUNG_LIST_VERBOSE |
342 | 3333 |
1707 | 3334 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3335 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3336 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3337 |
1707 | 3338 _young_list->reset_auxilary_lists(); |
342 | 3339 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3340 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3341 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3342 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3343 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3344 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3345 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3346 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3347 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3348 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3349 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3350 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3351 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3352 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3353 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3354 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3355 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3356 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3357 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3358 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3359 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3360 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3361 } |
342 | 3362 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3363 allocate_dummy_regions(); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3364 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3365 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3366 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3367 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3368 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3369 #endif // YOUNG_LIST_VERBOSE |
342 | 3370 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3371 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3372 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3373 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3374 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3375 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3376 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3377 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3378 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3379 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3380 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3381 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3382 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3383 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3384 Universe::verify(false); |
342 | 3385 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3386 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3387 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3388 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3389 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3390 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3391 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3392 size_t bytes_before = capacity(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3393 if (!expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3394 // We failed to expand the heap so let's verify that |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3395 // committed/uncommitted amount match the backing store |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3396 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3397 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3398 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3399 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3400 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3401 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3402 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3403 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3404 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3405 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3406 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3407 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3408 #endif |
342 | 3409 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3410 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3411 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3412 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3413 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3414 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3415 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3416 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3417 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3418 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3419 |
2152 | 3420 verify_region_sets_optional(); |
3421 | |
1709 | 3422 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3423 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3424 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3425 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3426 Universe::print_heap_after_gc(); |
342 | 3427 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3428 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3429 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3430 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3431 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3432 } |
1973 | 3433 |
3434 return true; | |
342 | 3435 } |
3436 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3437 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3438 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3439 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3440 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3441 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3442 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3443 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3444 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3445 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3446 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3447 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3448 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3449 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3450 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3451 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3452 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3453 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3454 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3455 void G1CollectedHeap::init_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3456 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3457 _mutator_alloc_region.init(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3458 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3459 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3460 void G1CollectedHeap::release_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3461 _mutator_alloc_region.release(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3462 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3463 } |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3464 |
342 | 3465 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3466 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3467 // make sure we don't call set_gc_alloc_region() multiple times on |
3468 // the same region | |
3469 assert(r == NULL || !r->is_gc_alloc_region(), | |
3470 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3471 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3472 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3473 |
342 | 3474 HeapWord* original_top = NULL; |
3475 if (r != NULL) | |
3476 original_top = r->top(); | |
3477 | |
3478 // We will want to record the used space in r as being there before gc. | |
3479 // One we install it as a GC alloc region it's eligible for allocation. | |
3480 // So record it now and use it later. | |
3481 size_t r_used = 0; | |
3482 if (r != NULL) { | |
3483 r_used = r->used(); | |
3484 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3485 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3486 // need to take the lock to guard against two threads calling |
3487 // get_gc_alloc_region concurrently (very unlikely but...) | |
3488 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3489 r->save_marks(); | |
3490 } | |
3491 } | |
3492 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3493 _gc_alloc_regions[purpose] = r; | |
3494 if (old_alloc_region != NULL) { | |
3495 // Replace aliases too. | |
3496 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3497 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3498 _gc_alloc_regions[ap] = r; | |
3499 } | |
3500 } | |
3501 } | |
3502 if (r != NULL) { | |
3503 push_gc_alloc_region(r); | |
3504 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3505 // We are using a region as a GC alloc region after it has been used | |
3506 // as a mutator allocation region during the current marking cycle. | |
3507 // The mutator-allocated objects are currently implicitly marked, but | |
3508 // when we move hr->next_top_at_mark_start() forward at the the end | |
3509 // of the GC pause, they won't be. We therefore mark all objects in | |
3510 // the "gap". We do this object-by-object, since marking densely | |
3511 // does not currently work right with marking bitmap iteration. This | |
3512 // means we rely on TLAB filling at the start of pauses, and no | |
3513 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3514 // to fix the marking bitmap iteration. | |
3515 HeapWord* curhw = r->next_top_at_mark_start(); | |
3516 HeapWord* t = original_top; | |
3517 | |
3518 while (curhw < t) { | |
3519 oop cur = (oop)curhw; | |
3520 // We'll assume parallel for generality. This is rare code. | |
3521 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3522 curhw = curhw + cur->size(); | |
3523 } | |
3524 assert(curhw == t, "Should have parsed correctly."); | |
3525 } | |
3526 if (G1PolicyVerbose > 1) { | |
3527 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3528 "for survivors:", r->bottom(), original_top, r->end()); | |
3529 r->print(); | |
3530 } | |
3531 g1_policy()->record_before_bytes(r_used); | |
3532 } | |
3533 } | |
3534 | |
3535 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3536 assert(Thread::current()->is_VM_thread() || | |
2152 | 3537 FreeList_lock->owned_by_self(), "Precondition"); |
342 | 3538 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
3539 "Precondition."); | |
3540 hr->set_is_gc_alloc_region(true); | |
3541 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3542 _gc_alloc_region_list = hr; | |
3543 } | |
3544 | |
3545 #ifdef G1_DEBUG | |
3546 class FindGCAllocRegion: public HeapRegionClosure { | |
3547 public: | |
3548 bool doHeapRegion(HeapRegion* r) { | |
3549 if (r->is_gc_alloc_region()) { | |
3550 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3551 r->hrs_index(), r->bottom()); | |
3552 } | |
3553 return false; | |
3554 } | |
3555 }; | |
3556 #endif // G1_DEBUG | |
3557 | |
3558 void G1CollectedHeap::forget_alloc_region_list() { | |
2152 | 3559 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 3560 while (_gc_alloc_region_list != NULL) { |
3561 HeapRegion* r = _gc_alloc_region_list; | |
3562 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3563 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3564 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3565 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3566 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3567 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3568 r->ContiguousSpace::set_saved_mark(); |
342 | 3569 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3570 r->set_next_gc_alloc_region(NULL); | |
3571 r->set_is_gc_alloc_region(false); | |
545 | 3572 if (r->is_survivor()) { |
3573 if (r->is_empty()) { | |
3574 r->set_not_young(); | |
3575 } else { | |
3576 _young_list->add_survivor_region(r); | |
3577 } | |
3578 } | |
342 | 3579 } |
3580 #ifdef G1_DEBUG | |
3581 FindGCAllocRegion fa; | |
3582 heap_region_iterate(&fa); | |
3583 #endif // G1_DEBUG | |
3584 } | |
3585 | |
3586 | |
3587 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3588 // TODO: allocation regions check | |
3589 return true; | |
3590 } | |
3591 | |
3592 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3593 // First, let's check that the GC alloc region list is empty (it should) |
3594 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3595 | |
342 | 3596 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3597 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3598 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3599 |
342 | 3600 // Create new GC alloc regions. |
636 | 3601 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3602 _retained_gc_alloc_regions[ap] = NULL; | |
3603 | |
3604 if (alloc_region != NULL) { | |
3605 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3606 | |
3607 // let's make sure that the GC alloc region is not tagged as such | |
3608 // outside a GC operation | |
3609 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3610 | |
3611 if (alloc_region->in_collection_set() || | |
3612 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3613 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3614 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3615 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3616 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3617 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3618 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3619 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3620 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3621 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3622 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3623 // object that may be less than the region size). |
636 | 3624 |
3625 alloc_region = NULL; | |
3626 } | |
3627 } | |
3628 | |
3629 if (alloc_region == NULL) { | |
3630 // we will get a new GC alloc region | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3631 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3632 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3633 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3634 ++_gc_alloc_region_counts[ap]; |
1388 | 3635 if (G1PrintHeapRegions) { |
3636 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3637 "top "PTR_FORMAT, | |
3638 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3639 } | |
342 | 3640 } |
636 | 3641 |
342 | 3642 if (alloc_region != NULL) { |
636 | 3643 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3644 set_gc_alloc_region(ap, alloc_region); |
3645 } | |
636 | 3646 |
3647 assert(_gc_alloc_regions[ap] == NULL || | |
3648 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3649 "the GC alloc region should be tagged as such"); | |
3650 assert(_gc_alloc_regions[ap] == NULL || | |
3651 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3652 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3653 } |
3654 // Set alternative regions for allocation purposes that have reached | |
636 | 3655 // their limit. |
342 | 3656 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3657 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3658 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3659 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3660 } | |
3661 } | |
3662 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3663 } | |
3664 | |
636 | 3665 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3666 // We keep a separate list of all regions that have been alloc regions in |
636 | 3667 // the current collection pause. Forget that now. This method will |
3668 // untag the GC alloc regions and tear down the GC alloc region | |
3669 // list. It's desirable that no regions are tagged as GC alloc | |
3670 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3671 |
342 | 3672 forget_alloc_region_list(); |
3673 | |
3674 // The current alloc regions contain objs that have survived | |
3675 // collection. Make them no longer GC alloc regions. | |
3676 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3677 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3678 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3679 _gc_alloc_region_counts[ap] = 0; |
636 | 3680 |
3681 if (r != NULL) { | |
3682 // we retain nothing on _gc_alloc_regions between GCs | |
3683 set_gc_alloc_region(ap, NULL); | |
3684 | |
3685 if (r->is_empty()) { | |
2152 | 3686 // We didn't actually allocate anything in it; let's just put |
3687 // it back on the free list. | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
3688 _free_list.add_as_head(r); |
636 | 3689 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3690 // retain it so that we can use it at the beginning of the next GC | |
3691 _retained_gc_alloc_regions[ap] = r; | |
342 | 3692 } |
3693 } | |
636 | 3694 } |
3695 } | |
3696 | |
3697 #ifndef PRODUCT | |
3698 // Useful for debugging | |
3699 | |
3700 void G1CollectedHeap::print_gc_alloc_regions() { | |
3701 gclog_or_tty->print_cr("GC alloc regions"); | |
3702 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3703 HeapRegion* r = _gc_alloc_regions[ap]; | |
3704 if (r == NULL) { | |
3705 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3706 } else { | |
3707 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3708 ap, r->bottom(), r->used()); | |
3709 } | |
3710 } | |
3711 } | |
3712 #endif // PRODUCT | |
342 | 3713 |
3714 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3715 _drain_in_progress = false; | |
3716 set_evac_failure_closure(cl); | |
3717 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3718 } | |
3719 | |
3720 void G1CollectedHeap::finalize_for_evac_failure() { | |
3721 assert(_evac_failure_scan_stack != NULL && | |
3722 _evac_failure_scan_stack->length() == 0, | |
3723 "Postcondition"); | |
3724 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3725 delete _evac_failure_scan_stack; |
342 | 3726 _evac_failure_scan_stack = NULL; |
3727 } | |
3728 | |
3729 | |
3730 | |
3731 // *** Sequential G1 Evacuation | |
3732 | |
3733 class G1IsAliveClosure: public BoolObjectClosure { | |
3734 G1CollectedHeap* _g1; | |
3735 public: | |
3736 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3737 void do_object(oop p) { assert(false, "Do not call."); } | |
3738 bool do_object_b(oop p) { | |
3739 // It is reachable if it is outside the collection set, or is inside | |
3740 // and forwarded. | |
3741 | |
3742 #ifdef G1_DEBUG | |
3743 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3744 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3745 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3746 #endif // G1_DEBUG | |
3747 | |
3748 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3749 } | |
3750 }; | |
3751 | |
3752 class G1KeepAliveClosure: public OopClosure { | |
3753 G1CollectedHeap* _g1; | |
3754 public: | |
3755 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3756 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3757 void do_oop( oop* p) { |
342 | 3758 oop obj = *p; |
3759 #ifdef G1_DEBUG | |
3760 if (PrintGC && Verbose) { | |
3761 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3762 p, (void*) obj, (void*) *p); | |
3763 } | |
3764 #endif // G1_DEBUG | |
3765 | |
3766 if (_g1->obj_in_cs(obj)) { | |
3767 assert( obj->is_forwarded(), "invariant" ); | |
3768 *p = obj->forwardee(); | |
3769 #ifdef G1_DEBUG | |
3770 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3771 (void*) obj, (void*) *p); | |
3772 #endif // G1_DEBUG | |
3773 } | |
3774 } | |
3775 }; | |
3776 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3777 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3778 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3779 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3780 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3781 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3782 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3783 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3784 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3785 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3786 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3787 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3788 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3789 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3790 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3791 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3792 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3793 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3794 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3795 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3796 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3797 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3798 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3799 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3800 |
342 | 3801 class RemoveSelfPointerClosure: public ObjectClosure { |
3802 private: | |
3803 G1CollectedHeap* _g1; | |
3804 ConcurrentMark* _cm; | |
3805 HeapRegion* _hr; | |
3806 size_t _prev_marked_bytes; | |
3807 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3808 OopsInHeapRegionClosure *_cl; |
342 | 3809 public: |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3810 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3811 OopsInHeapRegionClosure* cl) : |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3812 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3813 _next_marked_bytes(0), _cl(cl) {} |
342 | 3814 |
3815 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3816 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3817 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3818 // <original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3819 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3820 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3821 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3822 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3823 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3824 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3825 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3826 // would point into middle of the filler object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3827 // The current approach is to not coalesce and leave the BOT contents intact. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3828 // </original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3829 // |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3830 // We now reset the BOT when we start the object iteration over the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3831 // region and refine its entries for every object we come across. So |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3832 // the above comment is not really relevant and we should be able |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3833 // to coalesce dead objects if we want to. |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3834 void do_object(oop obj) { |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3835 HeapWord* obj_addr = (HeapWord*) obj; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3836 assert(_hr->is_in(obj_addr), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3837 size_t obj_size = obj->size(); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3838 _hr->update_bot_for_object(obj_addr, obj_size); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3839 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3840 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3841 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3842 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3843 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3844 _prev_marked_bytes += (obj_size * HeapWordSize); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3845 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3846 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3847 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3848 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3849 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3850 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3851 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3852 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3853 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3854 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3855 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3856 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3857 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3858 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3859 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3860 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3861 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3862 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3863 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3864 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3865 // dummy object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3866 MemRegion mr((HeapWord*)obj, obj_size); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3867 CollectedHeap::fill_with_object(mr); |
342 | 3868 _cm->clearRangeBothMaps(mr); |
3869 } | |
3870 } | |
3871 }; | |
3872 | |
3873 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3874 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3875 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3876 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3877 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3878 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3879 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3880 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3881 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3882 } |
342 | 3883 HeapRegion* cur = g1_policy()->collection_set(); |
3884 while (cur != NULL) { | |
3885 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3886 assert(!cur->isHumongous(), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3887 |
342 | 3888 if (cur->evacuation_failed()) { |
3889 assert(cur->in_collection_set(), "bad CS"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3890 RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3891 |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3892 cur->reset_bot(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3893 cl->set_region(cur); |
342 | 3894 cur->object_iterate(&rspc); |
3895 | |
3896 // A number of manipulations to make the TAMS be the current top, | |
3897 // and the marked bytes be the ones observed in the iteration. | |
3898 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3899 // The comments below are the postconditions achieved by the | |
3900 // calls. Note especially the last such condition, which says that | |
3901 // the count of marked bytes has been properly restored. | |
3902 cur->note_start_of_marking(false); | |
3903 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3904 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3905 // _next_marked_bytes == prev_marked_bytes. | |
3906 cur->note_end_of_marking(); | |
3907 // _prev_top_at_mark_start == top(), | |
3908 // _prev_marked_bytes == prev_marked_bytes | |
3909 } | |
3910 // If there is no mark in progress, we modified the _next variables | |
3911 // above needlessly, but harmlessly. | |
3912 if (_g1h->mark_in_progress()) { | |
3913 cur->note_start_of_marking(false); | |
3914 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3915 // _next_marked_bytes == next_marked_bytes. | |
3916 } | |
3917 | |
3918 // Now make sure the region has the right index in the sorted array. | |
3919 g1_policy()->note_change_in_marked_bytes(cur); | |
3920 } | |
3921 cur = cur->next_in_collection_set(); | |
3922 } | |
3923 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3924 | |
3925 // Now restore saved marks, if any. | |
3926 if (_objs_with_preserved_marks != NULL) { | |
3927 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3928 guarantee(_objs_with_preserved_marks->length() == | |
3929 _preserved_marks_of_objs->length(), "Both or none."); | |
3930 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3931 oop obj = _objs_with_preserved_marks->at(i); | |
3932 markOop m = _preserved_marks_of_objs->at(i); | |
3933 obj->set_mark(m); | |
3934 } | |
3935 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3936 delete _objs_with_preserved_marks; | |
3937 delete _preserved_marks_of_objs; | |
3938 _objs_with_preserved_marks = NULL; | |
3939 _preserved_marks_of_objs = NULL; | |
3940 } | |
3941 } | |
3942 | |
3943 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3944 _evac_failure_scan_stack->push(obj); | |
3945 } | |
3946 | |
3947 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3948 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3949 | |
3950 while (_evac_failure_scan_stack->length() > 0) { | |
3951 oop obj = _evac_failure_scan_stack->pop(); | |
3952 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3953 obj->oop_iterate_backwards(_evac_failure_closure); | |
3954 } | |
3955 } | |
3956 | |
3957 oop | |
3958 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3959 oop old) { | |
3960 markOop m = old->mark(); | |
3961 oop forward_ptr = old->forward_to_atomic(old); | |
3962 if (forward_ptr == NULL) { | |
3963 // Forward-to-self succeeded. | |
3964 if (_evac_failure_closure != cl) { | |
3965 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3966 assert(!_drain_in_progress, | |
3967 "Should only be true while someone holds the lock."); | |
3968 // Set the global evac-failure closure to the current thread's. | |
3969 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3970 set_evac_failure_closure(cl); | |
3971 // Now do the common part. | |
3972 handle_evacuation_failure_common(old, m); | |
3973 // Reset to NULL. | |
3974 set_evac_failure_closure(NULL); | |
3975 } else { | |
3976 // The lock is already held, and this is recursive. | |
3977 assert(_drain_in_progress, "This should only be the recursive case."); | |
3978 handle_evacuation_failure_common(old, m); | |
3979 } | |
3980 return old; | |
3981 } else { | |
3982 // Someone else had a place to copy it. | |
3983 return forward_ptr; | |
3984 } | |
3985 } | |
3986 | |
3987 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3988 set_evacuation_failed(true); | |
3989 | |
3990 preserve_mark_if_necessary(old, m); | |
3991 | |
3992 HeapRegion* r = heap_region_containing(old); | |
3993 if (!r->evacuation_failed()) { | |
3994 r->set_evacuation_failed(true); | |
1282 | 3995 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
3996 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 3997 "["PTR_FORMAT","PTR_FORMAT")\n", |
3998 r, r->bottom(), r->end()); | |
3999 } | |
4000 } | |
4001 | |
4002 push_on_evac_failure_scan_stack(old); | |
4003 | |
4004 if (!_drain_in_progress) { | |
4005 // prevent recursion in copy_to_survivor_space() | |
4006 _drain_in_progress = true; | |
4007 drain_evac_failure_scan_stack(); | |
4008 _drain_in_progress = false; | |
4009 } | |
4010 } | |
4011 | |
4012 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4013 assert(evacuation_failed(), "Oversaving!"); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4014 // We want to call the "for_promotion_failure" version only in the |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4015 // case of a promotion failure. |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4016 if (m->must_be_preserved_for_promotion_failure(obj)) { |
342 | 4017 if (_objs_with_preserved_marks == NULL) { |
4018 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4019 _objs_with_preserved_marks = | |
4020 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4021 _preserved_marks_of_objs = | |
4022 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4023 } | |
4024 _objs_with_preserved_marks->push(obj); | |
4025 _preserved_marks_of_objs->push(m); | |
4026 } | |
4027 } | |
4028 | |
4029 // *** Parallel G1 Evacuation | |
4030 | |
4031 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4032 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4033 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4034 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4035 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4036 |
342 | 4037 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4038 // let the caller handle alloc failure | |
4039 if (alloc_region == NULL) return NULL; | |
4040 | |
4041 HeapWord* block = alloc_region->par_allocate(word_size); | |
4042 if (block == NULL) { | |
4043 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4044 } | |
4045 return block; | |
4046 } | |
4047 | |
545 | 4048 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4049 bool par) { | |
4050 // Another thread might have obtained alloc_region for the given | |
4051 // purpose, and might be attempting to allocate in it, and might | |
4052 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4053 // region below until we're sure the last allocation has happened. | |
4054 // We ensure this by allocating the remaining space with a garbage | |
4055 // object. | |
4056 if (par) par_allocate_remaining_space(alloc_region); | |
4057 // Now we can do the post-GC stuff on the region. | |
4058 alloc_region->note_end_of_copying(); | |
4059 g1_policy()->record_after_bytes(alloc_region->used()); | |
4060 } | |
4061 | |
342 | 4062 HeapWord* |
4063 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4064 HeapRegion* alloc_region, | |
4065 bool par, | |
4066 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4067 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4068 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4069 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4070 |
2152 | 4071 // We need to make sure we serialize calls to this method. Given |
4072 // that the FreeList_lock guards accesses to the free_list anyway, | |
4073 // and we need to potentially remove a region from it, we'll use it | |
4074 // to protect the whole call. | |
4075 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
4076 | |
342 | 4077 HeapWord* block = NULL; |
4078 // In the parallel case, a previous thread to obtain the lock may have | |
4079 // already assigned a new gc_alloc_region. | |
4080 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4081 assert(par, "But should only happen in parallel case."); | |
4082 alloc_region = _gc_alloc_regions[purpose]; | |
4083 if (alloc_region == NULL) return NULL; | |
4084 block = alloc_region->par_allocate(word_size); | |
4085 if (block != NULL) return block; | |
4086 // Otherwise, continue; this new region is empty, too. | |
4087 } | |
4088 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4089 retire_alloc_region(alloc_region, par); |
342 | 4090 |
4091 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4092 // Cannot allocate more regions for the given purpose. | |
4093 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4094 // Is there an alternative? | |
4095 if (purpose != alt_purpose) { | |
4096 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4097 // Has not the alternative region been aliased? | |
545 | 4098 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4099 // Try to allocate in the alternative region. |
4100 if (par) { | |
4101 block = alt_region->par_allocate(word_size); | |
4102 } else { | |
4103 block = alt_region->allocate(word_size); | |
4104 } | |
4105 // Make an alias. | |
4106 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4107 if (block != NULL) { |
4108 return block; | |
4109 } | |
4110 retire_alloc_region(alt_region, par); | |
342 | 4111 } |
4112 // Both the allocation region and the alternative one are full | |
4113 // and aliased, replace them with a new allocation region. | |
4114 purpose = alt_purpose; | |
4115 } else { | |
4116 set_gc_alloc_region(purpose, NULL); | |
4117 return NULL; | |
4118 } | |
4119 } | |
4120 | |
4121 // Now allocate a new region for allocation. | |
2152 | 4122 alloc_region = new_gc_alloc_region(purpose, word_size); |
342 | 4123 |
4124 // let the caller handle alloc failure | |
4125 if (alloc_region != NULL) { | |
4126 | |
4127 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4128 assert(alloc_region->saved_mark_at_top(), | |
4129 "Mark should have been saved already."); | |
4130 // This must be done last: once it's installed, other regions may | |
4131 // allocate in it (without holding the lock.) | |
4132 set_gc_alloc_region(purpose, alloc_region); | |
4133 | |
4134 if (par) { | |
4135 block = alloc_region->par_allocate(word_size); | |
4136 } else { | |
4137 block = alloc_region->allocate(word_size); | |
4138 } | |
4139 // Caller handles alloc failure. | |
4140 } else { | |
4141 // This sets other apis using the same old alloc region to NULL, also. | |
4142 set_gc_alloc_region(purpose, NULL); | |
4143 } | |
4144 return block; // May be NULL. | |
4145 } | |
4146 | |
4147 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4148 HeapWord* block = NULL; | |
4149 size_t free_words; | |
4150 do { | |
4151 free_words = r->free()/HeapWordSize; | |
4152 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4153 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4154 // Otherwise, try to claim it. |
4155 block = r->par_allocate(free_words); | |
4156 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4157 fill_with_object(block, free_words); |
342 | 4158 } |
4159 | |
4160 #ifndef PRODUCT | |
4161 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4162 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4163 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4164 return true; | |
4165 } | |
4166 #endif // PRODUCT | |
4167 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4168 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4169 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4170 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4171 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4172 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4173 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4174 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4175 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4176 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4177 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4178 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4179 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4180 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4181 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4182 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4183 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4184 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4185 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4186 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4187 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4188 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4189 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4190 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4191 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4192 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4193 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4194 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4195 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4196 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4197 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4198 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4199 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4200 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4201 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4202 } |
342 | 4203 |
1709 | 4204 void |
4205 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4206 { | |
4207 st->print_raw_cr("GC Termination Stats"); | |
4208 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4209 " ------waste (KiB)------"); | |
4210 st->print_raw_cr("thr ms ms % ms % attempts" | |
4211 " total alloc undo"); | |
4212 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4213 " ------- ------- -------"); | |
4214 } | |
4215 | |
4216 void | |
4217 G1ParScanThreadState::print_termination_stats(int i, | |
4218 outputStream* const st) const | |
4219 { | |
4220 const double elapsed_ms = elapsed_time() * 1000.0; | |
4221 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4222 const double term_ms = term_time() * 1000.0; | |
4223 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4224 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4225 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4226 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4227 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4228 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4229 alloc_buffer_waste() * HeapWordSize / K, | |
4230 undo_waste() * HeapWordSize / K); | |
4231 } | |
4232 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4233 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4234 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4235 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4236 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4237 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4238 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4239 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4240 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4241 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4242 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4243 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4244 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4245 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4246 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4247 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4248 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4249 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4250 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4251 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4252 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4253 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4254 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4255 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4256 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4257 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4258 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4259 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4260 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4261 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4262 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4263 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4264 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4265 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4266 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4267 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4268 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4269 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4270 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4271 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4272 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4273 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4274 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4275 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4276 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4277 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4278 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4279 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4280 |
342 | 4281 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4282 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4283 _par_scan_state(par_scan_state) { } | |
4284 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4285 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4286 // This is called _after_ do_oop_work has been called, hence after |
4287 // the object has been relocated to its new location and *p points | |
4288 // to its new location. | |
4289 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4290 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4291 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4292 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4293 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 4294 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4295 HeapWord* addr = (HeapWord*)obj; |
342 | 4296 if (_g1->is_in_g1_reserved(addr)) |
4297 _cm->grayRoot(oop(addr)); | |
4298 } | |
4299 } | |
4300 | |
4301 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4302 size_t word_sz = old->size(); | |
4303 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4304 // +1 to make the -1 indexes valid... | |
4305 int young_index = from_region->young_index_in_cset()+1; | |
4306 assert( (from_region->is_young() && young_index > 0) || | |
4307 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4308 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4309 markOop m = old->mark(); | |
545 | 4310 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4311 : m->age(); | |
4312 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4313 word_sz); |
4314 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4315 oop obj = oop(obj_ptr); | |
4316 | |
4317 if (obj_ptr == NULL) { | |
4318 // This will either forward-to-self, or detect that someone else has | |
4319 // installed a forwarding pointer. | |
4320 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4321 return _g1->handle_evacuation_failure_par(cl, old); | |
4322 } | |
4323 | |
526 | 4324 // We're going to allocate linearly, so might as well prefetch ahead. |
4325 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4326 | |
342 | 4327 oop forward_ptr = old->forward_to_atomic(obj); |
4328 if (forward_ptr == NULL) { | |
4329 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4330 if (g1p->track_object_age(alloc_purpose)) { |
4331 // We could simply do obj->incr_age(). However, this causes a | |
4332 // performance issue. obj->incr_age() will first check whether | |
4333 // the object has a displaced mark by checking its mark word; | |
4334 // getting the mark word from the new location of the object | |
4335 // stalls. So, given that we already have the mark word and we | |
4336 // are about to install it anyway, it's better to increase the | |
4337 // age on the mark word, when the object does not have a | |
4338 // displaced mark word. We're not expecting many objects to have | |
4339 // a displaced marked word, so that case is not optimized | |
4340 // further (it could be...) and we simply call obj->incr_age(). | |
4341 | |
4342 if (m->has_displaced_mark_helper()) { | |
4343 // in this case, we have to install the mark word first, | |
4344 // otherwise obj looks to be forwarded (the old mark word, | |
4345 // which contains the forward pointer, was copied) | |
4346 obj->set_mark(m); | |
4347 obj->incr_age(); | |
4348 } else { | |
4349 m = m->incr_age(); | |
545 | 4350 obj->set_mark(m); |
526 | 4351 } |
545 | 4352 _par_scan_state->age_table()->add(obj, word_sz); |
4353 } else { | |
4354 obj->set_mark(m); | |
526 | 4355 } |
4356 | |
342 | 4357 // preserve "next" mark bit |
4358 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4359 if (!use_local_bitmaps || | |
4360 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4361 // if we couldn't mark it on the local bitmap (this happens when | |
4362 // the object was not allocated in the GCLab), we have to bite | |
4363 // the bullet and do the standard parallel mark | |
4364 _cm->markAndGrayObjectIfNecessary(obj); | |
4365 } | |
4366 #if 1 | |
4367 if (_g1->isMarkedNext(old)) { | |
4368 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4369 } | |
4370 #endif | |
4371 } | |
4372 | |
4373 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4374 surv_young_words[young_index] += word_sz; | |
4375 | |
4376 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4377 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4378 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4379 _par_scan_state->push_on_queue(old_p); |
342 | 4380 } else { |
526 | 4381 // No point in using the slower heap_region_containing() method, |
4382 // given that we know obj is in the heap. | |
4383 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4384 obj->oop_iterate_backwards(_scanner); |
4385 } | |
4386 } else { | |
4387 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4388 obj = forward_ptr; | |
4389 } | |
4390 return obj; | |
4391 } | |
4392 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4393 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4394 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4395 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4396 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4397 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4398 assert(barrier != G1BarrierRS || obj != NULL, |
4399 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4400 | |
526 | 4401 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4402 if (_g1->in_cset_fast_test(obj)) { |
342 | 4403 #if G1_REM_SET_LOGGING |
526 | 4404 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4405 "into CS.", p, (void*) obj); | |
342 | 4406 #endif |
526 | 4407 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4408 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4409 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4410 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4411 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4412 } |
526 | 4413 // When scanning the RS, we only care about objs in CS. |
4414 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4415 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4416 } |
526 | 4417 } |
4418 | |
4419 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4420 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4421 } |
4422 | |
4423 if (do_gen_barrier && obj != NULL) { | |
4424 par_do_barrier(p); | |
4425 } | |
4426 } | |
4427 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4428 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4429 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4430 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4431 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4432 assert(has_partial_array_mask(p), "invariant"); |
4433 oop old = clear_partial_array_mask(p); | |
342 | 4434 assert(old->is_objArray(), "must be obj array"); |
4435 assert(old->is_forwarded(), "must be forwarded"); | |
4436 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4437 | |
4438 objArrayOop obj = objArrayOop(old->forwardee()); | |
4439 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4440 // Process ParGCArrayScanChunk elements now | |
4441 // and push the remainder back onto queue | |
4442 int start = arrayOop(old)->length(); | |
4443 int end = obj->length(); | |
4444 int remainder = end - start; | |
4445 assert(start <= end, "just checking"); | |
4446 if (remainder > 2 * ParGCArrayScanChunk) { | |
4447 // Test above combines last partial chunk with a full chunk | |
4448 end = start + ParGCArrayScanChunk; | |
4449 arrayOop(old)->set_length(end); | |
4450 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4451 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4452 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4453 _par_scan_state->push_on_queue(old_p); |
342 | 4454 } else { |
4455 // Restore length so that the heap remains parsable in | |
4456 // case of evacuation failure. | |
4457 arrayOop(old)->set_length(end); | |
4458 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4459 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4460 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4461 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4462 } |
4463 | |
4464 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4465 protected: | |
4466 G1CollectedHeap* _g1h; | |
4467 G1ParScanThreadState* _par_scan_state; | |
4468 RefToScanQueueSet* _queues; | |
4469 ParallelTaskTerminator* _terminator; | |
4470 | |
4471 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4472 RefToScanQueueSet* queues() { return _queues; } | |
4473 ParallelTaskTerminator* terminator() { return _terminator; } | |
4474 | |
4475 public: | |
4476 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4477 G1ParScanThreadState* par_scan_state, | |
4478 RefToScanQueueSet* queues, | |
4479 ParallelTaskTerminator* terminator) | |
4480 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4481 _queues(queues), _terminator(terminator) {} | |
4482 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4483 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4484 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4485 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4486 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4487 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4488 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4489 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4490 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4491 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4492 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4493 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4494 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4495 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4496 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4497 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4498 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4499 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4500 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4501 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4502 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4503 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4504 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4505 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4506 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4507 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4508 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4509 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4510 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4511 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4512 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4513 // we drain the queues as necessary. |
342 | 4514 pss->trim_queue(); |
4515 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4516 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4517 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4518 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4519 } |
342 | 4520 |
4521 class G1ParTask : public AbstractGangTask { | |
4522 protected: | |
4523 G1CollectedHeap* _g1h; | |
4524 RefToScanQueueSet *_queues; | |
4525 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4526 int _n_workers; |
342 | 4527 |
4528 Mutex _stats_lock; | |
4529 Mutex* stats_lock() { return &_stats_lock; } | |
4530 | |
4531 size_t getNCards() { | |
4532 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4533 / G1BlockOffsetSharedArray::N_bytes; | |
4534 } | |
4535 | |
4536 public: | |
4537 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4538 : AbstractGangTask("G1 collection"), | |
4539 _g1h(g1h), | |
4540 _queues(task_queues), | |
4541 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4542 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4543 _n_workers(workers) |
342 | 4544 {} |
4545 | |
4546 RefToScanQueueSet* queues() { return _queues; } | |
4547 | |
4548 RefToScanQueue *work_queue(int i) { | |
4549 return queues()->queue(i); | |
4550 } | |
4551 | |
4552 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4553 if (i >= _n_workers) return; // no work needed this round |
1611 | 4554 |
4555 double start_time_ms = os::elapsedTime() * 1000.0; | |
4556 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4557 | |
342 | 4558 ResourceMark rm; |
4559 HandleMark hm; | |
4560 | |
526 | 4561 G1ParScanThreadState pss(_g1h, i); |
4562 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4563 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4564 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4565 |
4566 pss.set_evac_closure(&scan_evac_cl); | |
4567 pss.set_evac_failure_closure(&evac_failure_cl); | |
4568 pss.set_partial_scan_closure(&partial_scan_cl); | |
4569 | |
4570 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4571 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4572 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4573 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4574 |
342 | 4575 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4576 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4577 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4578 | |
4579 OopsInHeapRegionClosure *scan_root_cl; | |
4580 OopsInHeapRegionClosure *scan_perm_cl; | |
4581 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4582 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4583 scan_root_cl = &scan_mark_root_cl; |
4584 scan_perm_cl = &scan_mark_perm_cl; | |
4585 } else { | |
4586 scan_root_cl = &only_scan_root_cl; | |
4587 scan_perm_cl = &only_scan_perm_cl; | |
4588 } | |
4589 | |
4590 pss.start_strong_roots(); | |
4591 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4592 SharedHeap::SO_AllClasses, | |
4593 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4594 &push_heap_rs_cl, |
342 | 4595 scan_perm_cl, |
4596 i); | |
4597 pss.end_strong_roots(); | |
4598 { | |
4599 double start = os::elapsedTime(); | |
4600 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4601 evac.do_void(); | |
4602 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4603 double term_ms = pss.term_time()*1000.0; | |
4604 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4605 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4606 } |
1282 | 4607 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4608 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4609 | |
4610 // Clean up any par-expanded rem sets. | |
4611 HeapRegionRemSet::par_cleanup(); | |
4612 | |
4613 if (ParallelGCVerbose) { | |
1709 | 4614 MutexLocker x(stats_lock()); |
4615 pss.print_termination_stats(i); | |
342 | 4616 } |
4617 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4618 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4619 double end_time_ms = os::elapsedTime() * 1000.0; |
4620 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4621 } |
4622 }; | |
4623 | |
4624 // *** Common G1 Evacuation Stuff | |
4625 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4626 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4627 |
342 | 4628 void |
4629 G1CollectedHeap:: | |
4630 g1_process_strong_roots(bool collecting_perm_gen, | |
4631 SharedHeap::ScanningOption so, | |
4632 OopClosure* scan_non_heap_roots, | |
4633 OopsInHeapRegionClosure* scan_rs, | |
4634 OopsInGenClosure* scan_perm, | |
4635 int worker_i) { | |
4636 // First scan the strong roots, including the perm gen. | |
4637 double ext_roots_start = os::elapsedTime(); | |
4638 double closure_app_time_sec = 0.0; | |
4639 | |
4640 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4641 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4642 buf_scan_perm.set_generation(perm_gen()); | |
4643 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4644 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4645 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4646 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4647 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4648 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4649 collecting_perm_gen, so, |
342 | 4650 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4651 &eager_scan_code_roots, |
342 | 4652 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4653 |
342 | 4654 // Finish up any enqueued closure apps. |
4655 buf_scan_non_heap_roots.done(); | |
4656 buf_scan_perm.done(); | |
4657 double ext_roots_end = os::elapsedTime(); | |
4658 g1_policy()->reset_obj_copy_time(worker_i); | |
4659 double obj_copy_time_sec = | |
4660 buf_scan_non_heap_roots.closure_app_seconds() + | |
4661 buf_scan_perm.closure_app_seconds(); | |
4662 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4663 double ext_root_time_ms = | |
4664 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4665 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4666 | |
4667 // Scan strong roots in mark stack. | |
4668 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4669 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4670 } | |
4671 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4672 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4673 | |
4674 // XXX What should this be doing in the parallel case? | |
4675 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4676 // Now scan the complement of the collection set. | |
4677 if (scan_rs != NULL) { | |
4678 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4679 } | |
4680 // Finish with the ref_processor roots. | |
4681 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4682 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4683 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4684 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4685 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4686 ref_processor()->oops_do(scan_non_heap_roots); |
4687 } | |
4688 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4689 _process_strong_tasks->all_tasks_completed(); | |
4690 } | |
4691 | |
4692 void | |
4693 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4694 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4695 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4696 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4697 } |
4698 | |
4699 | |
4700 class SaveMarksClosure: public HeapRegionClosure { | |
4701 public: | |
4702 bool doHeapRegion(HeapRegion* r) { | |
4703 r->save_marks(); | |
4704 return false; | |
4705 } | |
4706 }; | |
4707 | |
4708 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4709 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4710 SaveMarksClosure sm; |
4711 heap_region_iterate(&sm); | |
4712 } | |
4713 // We do this even in the parallel case | |
4714 perm_gen()->save_marks(); | |
4715 } | |
4716 | |
4717 void G1CollectedHeap::evacuate_collection_set() { | |
4718 set_evacuation_failed(false); | |
4719 | |
4720 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4721 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4722 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4723 | |
342 | 4724 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4725 set_par_threads(n_workers); | |
4726 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4727 | |
4728 init_for_evac_failure(NULL); | |
4729 | |
4730 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4731 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4732 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4733 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4734 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4735 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4736 StrongRootsScope srs(this); |
1709 | 4737 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4738 workers()->run_task(&g1_par_task); |
4739 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4740 StrongRootsScope srs(this); |
342 | 4741 g1_par_task.work(0); |
4742 } | |
4743 | |
4744 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4745 g1_policy()->record_par_time(par_time); | |
4746 set_par_threads(0); | |
4747 // Is this the right thing to do here? We don't save marks | |
4748 // on individual heap regions when we allocate from | |
4749 // them in parallel, so this seems like the correct place for this. | |
545 | 4750 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4751 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4752 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4753 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4754 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4755 // here too. |
342 | 4756 { |
4757 G1IsAliveClosure is_alive(this); | |
4758 G1KeepAliveClosure keep_alive(this); | |
4759 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4760 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4761 release_gc_alloc_regions(false /* totally */); |
342 | 4762 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4763 |
889 | 4764 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4765 concurrent_g1_refine()->set_use_cache(true); |
4766 | |
4767 finalize_for_evac_failure(); | |
4768 | |
4769 // Must do this before removing self-forwarding pointers, which clears | |
4770 // the per-region evac-failure flags. | |
4771 concurrent_mark()->complete_marking_in_collection_set(); | |
4772 | |
4773 if (evacuation_failed()) { | |
4774 remove_self_forwarding_pointers(); | |
4775 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4776 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4777 } else if (PrintGC) { |
4778 gclog_or_tty->print("--"); | |
4779 } | |
4780 } | |
4781 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4782 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4783 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4784 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4785 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4786 |
4787 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4788 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4789 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4790 } |
342 | 4791 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4792 } | |
4793 | |
2173 | 4794 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, |
2152 | 4795 size_t* pre_used, |
4796 FreeRegionList* free_list, | |
4797 HumongousRegionSet* humongous_proxy_set, | |
2173 | 4798 HRRSCleanupTask* hrrs_cleanup_task, |
2152 | 4799 bool par) { |
4800 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { | |
4801 if (hr->isHumongous()) { | |
4802 assert(hr->startsHumongous(), "we should only see starts humongous"); | |
4803 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); | |
4804 } else { | |
4805 free_region(hr, pre_used, free_list, par); | |
342 | 4806 } |
2173 | 4807 } else { |
4808 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task); | |
342 | 4809 } |
4810 } | |
4811 | |
2152 | 4812 void G1CollectedHeap::free_region(HeapRegion* hr, |
4813 size_t* pre_used, | |
4814 FreeRegionList* free_list, | |
4815 bool par) { | |
4816 assert(!hr->isHumongous(), "this is only for non-humongous regions"); | |
4817 assert(!hr->is_empty(), "the region should not be empty"); | |
4818 assert(free_list != NULL, "pre-condition"); | |
4819 | |
4820 *pre_used += hr->used(); | |
4821 hr->hr_clear(par, true /* clear_space */); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4822 free_list->add_as_head(hr); |
2152 | 4823 } |
4824 | |
4825 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, | |
4826 size_t* pre_used, | |
4827 FreeRegionList* free_list, | |
4828 HumongousRegionSet* humongous_proxy_set, | |
4829 bool par) { | |
4830 assert(hr->startsHumongous(), "this is only for starts humongous regions"); | |
4831 assert(free_list != NULL, "pre-condition"); | |
4832 assert(humongous_proxy_set != NULL, "pre-condition"); | |
4833 | |
4834 size_t hr_used = hr->used(); | |
4835 size_t hr_capacity = hr->capacity(); | |
4836 size_t hr_pre_used = 0; | |
4837 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); | |
4838 hr->set_notHumongous(); | |
4839 free_region(hr, &hr_pre_used, free_list, par); | |
4840 | |
4841 int i = hr->hrs_index() + 1; | |
4842 size_t num = 1; | |
4843 while ((size_t) i < n_regions()) { | |
4844 HeapRegion* curr_hr = _hrs->at(i); | |
4845 if (!curr_hr->continuesHumongous()) { | |
4846 break; | |
4847 } | |
4848 curr_hr->set_notHumongous(); | |
4849 free_region(curr_hr, &hr_pre_used, free_list, par); | |
4850 num += 1; | |
4851 i += 1; | |
4852 } | |
4853 assert(hr_pre_used == hr_used, | |
4854 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " | |
4855 "should be the same", hr_pre_used, hr_used)); | |
4856 *pre_used += hr_pre_used; | |
4857 } | |
4858 | |
4859 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, | |
4860 FreeRegionList* free_list, | |
4861 HumongousRegionSet* humongous_proxy_set, | |
4862 bool par) { | |
4863 if (pre_used > 0) { | |
4864 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; | |
342 | 4865 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
2152 | 4866 assert(_summary_bytes_used >= pre_used, |
4867 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" " | |
4868 "should be >= pre_used: "SIZE_FORMAT, | |
4869 _summary_bytes_used, pre_used)); | |
342 | 4870 _summary_bytes_used -= pre_used; |
2152 | 4871 } |
4872 if (free_list != NULL && !free_list->is_empty()) { | |
4873 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4874 _free_list.add_as_head(free_list); |
2152 | 4875 } |
4876 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { | |
4877 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); | |
4878 _humongous_set.update_from_proxy(humongous_proxy_set); | |
342 | 4879 } |
4880 } | |
4881 | |
4882 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4883 while (list != NULL) { | |
4884 guarantee( list->is_young(), "invariant" ); | |
4885 | |
4886 HeapWord* bottom = list->bottom(); | |
4887 HeapWord* end = list->end(); | |
4888 MemRegion mr(bottom, end); | |
4889 ct_bs->dirty(mr); | |
4890 | |
4891 list = list->get_next_young_region(); | |
4892 } | |
4893 } | |
4894 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4895 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4896 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4897 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4898 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4899 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4900 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4901 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4902 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4903 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4904 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4905 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4906 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4907 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4908 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4909 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4910 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4911 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4912 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4913 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4914 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4915 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4916 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4917 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4918 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4919 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4920 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4921 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4922 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4923 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4924 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4925 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4926 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4927 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4928 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4929 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4930 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4931 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4932 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4933 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4934 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4935 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4936 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4937 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4938 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4939 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4940 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4941 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4942 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4943 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4944 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4945 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4946 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4947 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4948 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4949 : _ct_bs(ct_bs) { } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4950 virtual bool doHeapRegion(HeapRegion* r) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4951 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4952 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4953 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4954 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4955 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4956 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4957 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4958 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4959 }; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4960 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4961 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4962 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4963 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4964 // We cannot guarantee that [bottom(),end()] is dirty. Threads |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4965 // dirty allocated blocks as they allocate them. The thread that |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4966 // retires each region and replaces it with a new one will do a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4967 // maximal allocation to fill in [pre_dummy_top(),end()] but will |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4968 // not dirty that area (one less thing to have to do while holding |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4969 // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4970 // is dirty. Also note that verify_dirty_region() requires |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4971 // mr.start() and mr.end() to be card aligned and pre_dummy_top() |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4972 // is not guaranteed to be. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4973 MemRegion mr(hr->bottom(), |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4974 ct_bs->align_to_card_boundary(hr->pre_dummy_top())); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4975 ct_bs->verify_dirty_region(mr); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4976 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4977 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4978 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4979 void G1CollectedHeap::verify_dirty_young_regions() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4980 verify_dirty_young_list(_young_list->first_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4981 verify_dirty_young_list(_young_list->first_survivor_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
4982 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4983 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4984 |
342 | 4985 void G1CollectedHeap::cleanUpCardTable() { |
4986 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4987 double start = os::elapsedTime(); | |
4988 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4989 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4990 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4991 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4992 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4993 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4994 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4995 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4996 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4997 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4998 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4999 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5000 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5001 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5002 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5003 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5004 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5005 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5006 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5007 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5008 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5009 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5010 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5011 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5012 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5013 |
342 | 5014 double elapsed = os::elapsedTime() - start; |
5015 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5016 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5017 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5018 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5019 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5020 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5021 #endif |
342 | 5022 } |
5023 | |
5024 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
2152 | 5025 size_t pre_used = 0; |
5026 FreeRegionList local_free_list("Local List for CSet Freeing"); | |
5027 | |
342 | 5028 double young_time_ms = 0.0; |
5029 double non_young_time_ms = 0.0; | |
5030 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5031 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5032 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5033 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5034 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5035 |
342 | 5036 G1CollectorPolicy* policy = g1_policy(); |
5037 | |
5038 double start_sec = os::elapsedTime(); | |
5039 bool non_young = true; | |
5040 | |
5041 HeapRegion* cur = cs_head; | |
5042 int age_bound = -1; | |
5043 size_t rs_lengths = 0; | |
5044 | |
5045 while (cur != NULL) { | |
2361 | 5046 assert(!is_on_master_free_list(cur), "sanity"); |
2152 | 5047 |
342 | 5048 if (non_young) { |
5049 if (cur->is_young()) { | |
5050 double end_sec = os::elapsedTime(); | |
5051 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5052 non_young_time_ms += elapsed_ms; | |
5053 | |
5054 start_sec = os::elapsedTime(); | |
5055 non_young = false; | |
5056 } | |
5057 } else { | |
2152 | 5058 double end_sec = os::elapsedTime(); |
5059 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5060 young_time_ms += elapsed_ms; | |
5061 | |
5062 start_sec = os::elapsedTime(); | |
5063 non_young = true; | |
342 | 5064 } |
5065 | |
5066 rs_lengths += cur->rem_set()->occupied(); | |
5067 | |
5068 HeapRegion* next = cur->next_in_collection_set(); | |
5069 assert(cur->in_collection_set(), "bad CS"); | |
5070 cur->set_next_in_collection_set(NULL); | |
5071 cur->set_in_collection_set(false); | |
5072 | |
5073 if (cur->is_young()) { | |
5074 int index = cur->young_index_in_cset(); | |
5075 guarantee( index != -1, "invariant" ); | |
5076 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5077 size_t words_survived = _surviving_young_words[index]; | |
5078 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5079 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5080 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5081 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5082 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5083 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5084 cur->set_next_young_region(NULL); |
342 | 5085 } else { |
5086 int index = cur->young_index_in_cset(); | |
5087 guarantee( index == -1, "invariant" ); | |
5088 } | |
5089 | |
5090 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5091 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5092 "invariant" ); | |
5093 | |
5094 if (!cur->evacuation_failed()) { | |
5095 // And the region is empty. | |
2152 | 5096 assert(!cur->is_empty(), "Should not have empty regions in a CS."); |
5097 free_region(cur, &pre_used, &local_free_list, false /* par */); | |
342 | 5098 } else { |
5099 cur->uninstall_surv_rate_group(); | |
5100 if (cur->is_young()) | |
5101 cur->set_young_index_in_cset(-1); | |
5102 cur->set_not_young(); | |
5103 cur->set_evacuation_failed(false); | |
5104 } | |
5105 cur = next; | |
5106 } | |
5107 | |
5108 policy->record_max_rs_lengths(rs_lengths); | |
5109 policy->cset_regions_freed(); | |
5110 | |
5111 double end_sec = os::elapsedTime(); | |
5112 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5113 if (non_young) | |
5114 non_young_time_ms += elapsed_ms; | |
5115 else | |
5116 young_time_ms += elapsed_ms; | |
5117 | |
2152 | 5118 update_sets_after_freeing_regions(pre_used, &local_free_list, |
5119 NULL /* humongous_proxy_set */, | |
5120 false /* par */); | |
342 | 5121 policy->record_young_free_cset_time_ms(young_time_ms); |
5122 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5123 } | |
5124 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5125 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5126 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5127 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5128 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5129 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5130 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5131 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5132 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5133 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5134 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5135 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5136 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5137 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5138 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5139 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5140 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5141 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5142 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5143 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5144 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5145 |
2152 | 5146 void G1CollectedHeap::set_free_regions_coming() { |
5147 if (G1ConcRegionFreeingVerbose) { | |
5148 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5149 "setting free regions coming"); | |
5150 } | |
5151 | |
5152 assert(!free_regions_coming(), "pre-condition"); | |
5153 _free_regions_coming = true; | |
342 | 5154 } |
5155 | |
2152 | 5156 void G1CollectedHeap::reset_free_regions_coming() { |
5157 { | |
5158 assert(free_regions_coming(), "pre-condition"); | |
5159 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5160 _free_regions_coming = false; | |
5161 SecondaryFreeList_lock->notify_all(); | |
5162 } | |
5163 | |
5164 if (G1ConcRegionFreeingVerbose) { | |
5165 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5166 "reset free regions coming"); | |
342 | 5167 } |
5168 } | |
5169 | |
2152 | 5170 void G1CollectedHeap::wait_while_free_regions_coming() { |
5171 // Most of the time we won't have to wait, so let's do a quick test | |
5172 // first before we take the lock. | |
5173 if (!free_regions_coming()) { | |
5174 return; | |
5175 } | |
5176 | |
5177 if (G1ConcRegionFreeingVerbose) { | |
5178 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5179 "waiting for free regions"); | |
342 | 5180 } |
5181 | |
5182 { | |
2152 | 5183 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
5184 while (free_regions_coming()) { | |
5185 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
342 | 5186 } |
2152 | 5187 } |
5188 | |
5189 if (G1ConcRegionFreeingVerbose) { | |
5190 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5191 "done waiting for free regions"); | |
5192 } | |
342 | 5193 } |
5194 | |
5195 size_t G1CollectedHeap::n_regions() { | |
5196 return _hrs->length(); | |
5197 } | |
5198 | |
5199 size_t G1CollectedHeap::max_regions() { | |
5200 return | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
5201 (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) / |
342 | 5202 HeapRegion::GrainBytes; |
5203 } | |
5204 | |
5205 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5206 assert(heap_lock_held_for_gc(), | |
5207 "the heap lock should already be held by or for this thread"); | |
5208 _young_list->push_region(hr); | |
5209 g1_policy()->set_region_short_lived(hr); | |
5210 } | |
5211 | |
5212 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5213 private: | |
5214 bool _success; | |
5215 public: | |
5216 NoYoungRegionsClosure() : _success(true) { } | |
5217 bool doHeapRegion(HeapRegion* r) { | |
5218 if (r->is_young()) { | |
5219 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5220 r->bottom(), r->end()); | |
5221 _success = false; | |
5222 } | |
5223 return false; | |
5224 } | |
5225 bool success() { return _success; } | |
5226 }; | |
5227 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5228 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5229 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5230 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5231 if (check_heap) { |
342 | 5232 NoYoungRegionsClosure closure; |
5233 heap_region_iterate(&closure); | |
5234 ret = ret && closure.success(); | |
5235 } | |
5236 | |
5237 return ret; | |
5238 } | |
5239 | |
5240 void G1CollectedHeap::empty_young_list() { | |
5241 assert(heap_lock_held_for_gc(), | |
5242 "the heap lock should already be held by or for this thread"); | |
5243 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5244 | |
5245 _young_list->empty_list(); | |
5246 } | |
5247 | |
5248 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5249 bool no_allocs = true; | |
5250 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5251 HeapRegion* r = _gc_alloc_regions[ap]; | |
5252 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5253 } | |
5254 return no_allocs; | |
5255 } | |
5256 | |
545 | 5257 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5258 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5259 HeapRegion* r = _gc_alloc_regions[ap]; | |
5260 if (r != NULL) { | |
5261 // Check for aliases. | |
5262 bool has_processed_alias = false; | |
5263 for (int i = 0; i < ap; ++i) { | |
5264 if (_gc_alloc_regions[i] == r) { | |
5265 has_processed_alias = true; | |
5266 break; | |
5267 } | |
5268 } | |
5269 if (!has_processed_alias) { | |
545 | 5270 retire_alloc_region(r, false /* par */); |
342 | 5271 } |
5272 } | |
5273 } | |
5274 } | |
5275 | |
5276 // Done at the start of full GC. | |
5277 void G1CollectedHeap::tear_down_region_lists() { | |
2152 | 5278 _free_list.remove_all(); |
342 | 5279 } |
5280 | |
5281 class RegionResetter: public HeapRegionClosure { | |
2152 | 5282 G1CollectedHeap* _g1h; |
5283 FreeRegionList _local_free_list; | |
5284 | |
342 | 5285 public: |
2152 | 5286 RegionResetter() : _g1h(G1CollectedHeap::heap()), |
5287 _local_free_list("Local Free List for RegionResetter") { } | |
5288 | |
342 | 5289 bool doHeapRegion(HeapRegion* r) { |
5290 if (r->continuesHumongous()) return false; | |
5291 if (r->top() > r->bottom()) { | |
5292 if (r->top() < r->end()) { | |
5293 Copy::fill_to_words(r->top(), | |
5294 pointer_delta(r->end(), r->top())); | |
5295 } | |
5296 } else { | |
5297 assert(r->is_empty(), "tautology"); | |
2152 | 5298 _local_free_list.add_as_tail(r); |
342 | 5299 } |
5300 return false; | |
5301 } | |
5302 | |
2152 | 5303 void update_free_lists() { |
5304 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, | |
5305 false /* par */); | |
5306 } | |
342 | 5307 }; |
5308 | |
5309 // Done at the end of full GC. | |
5310 void G1CollectedHeap::rebuild_region_lists() { | |
5311 // This needs to go at the end of the full GC. | |
5312 RegionResetter rs; | |
5313 heap_region_iterate(&rs); | |
2152 | 5314 rs.update_free_lists(); |
342 | 5315 } |
5316 | |
5317 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5318 _refine_cte_cl->set_concurrent(concurrent); | |
5319 } | |
5320 | |
5321 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5322 HeapRegion* hr = heap_region_containing(p); | |
5323 if (hr == NULL) { | |
5324 return is_in_permanent(p); | |
5325 } else { | |
5326 return hr->is_in(p); | |
5327 } | |
5328 } | |
2152 | 5329 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5330 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5331 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5332 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5333 assert(!force || g1_policy()->can_expand_young_list(), |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5334 "if force is true we should be able to expand the young list"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5335 if (force || !g1_policy()->is_young_list_full()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5336 HeapRegion* new_alloc_region = new_region(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5337 false /* do_expand */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5338 if (new_alloc_region != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5339 g1_policy()->update_region_num(true /* next_is_young */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5340 set_region_short_lived_locked(new_alloc_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5341 return new_alloc_region; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5342 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5343 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5344 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5345 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5346 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5347 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5348 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5349 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5350 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5351 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5352 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5353 _summary_bytes_used += allocated_bytes; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5354 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5355 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5356 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5357 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5358 return _g1h->new_mutator_alloc_region(word_size, force); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5359 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5360 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5361 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5362 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5363 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5364 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5365 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5366 // Heap region set verification |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5367 |
2152 | 5368 class VerifyRegionListsClosure : public HeapRegionClosure { |
5369 private: | |
5370 HumongousRegionSet* _humongous_set; | |
5371 FreeRegionList* _free_list; | |
5372 size_t _region_count; | |
5373 | |
5374 public: | |
5375 VerifyRegionListsClosure(HumongousRegionSet* humongous_set, | |
5376 FreeRegionList* free_list) : | |
5377 _humongous_set(humongous_set), _free_list(free_list), | |
5378 _region_count(0) { } | |
5379 | |
5380 size_t region_count() { return _region_count; } | |
5381 | |
5382 bool doHeapRegion(HeapRegion* hr) { | |
5383 _region_count += 1; | |
5384 | |
5385 if (hr->continuesHumongous()) { | |
5386 return false; | |
5387 } | |
5388 | |
5389 if (hr->is_young()) { | |
5390 // TODO | |
5391 } else if (hr->startsHumongous()) { | |
5392 _humongous_set->verify_next_region(hr); | |
5393 } else if (hr->is_empty()) { | |
5394 _free_list->verify_next_region(hr); | |
5395 } | |
5396 return false; | |
5397 } | |
5398 }; | |
5399 | |
5400 void G1CollectedHeap::verify_region_sets() { | |
5401 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | |
5402 | |
5403 // First, check the explicit lists. | |
5404 _free_list.verify(); | |
5405 { | |
5406 // Given that a concurrent operation might be adding regions to | |
5407 // the secondary free list we have to take the lock before | |
5408 // verifying it. | |
5409 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5410 _secondary_free_list.verify(); | |
5411 } | |
5412 _humongous_set.verify(); | |
5413 | |
5414 // If a concurrent region freeing operation is in progress it will | |
5415 // be difficult to correctly attributed any free regions we come | |
5416 // across to the correct free list given that they might belong to | |
5417 // one of several (free_list, secondary_free_list, any local lists, | |
5418 // etc.). So, if that's the case we will skip the rest of the | |
5419 // verification operation. Alternatively, waiting for the concurrent | |
5420 // operation to complete will have a non-trivial effect on the GC's | |
5421 // operation (no concurrent operation will last longer than the | |
5422 // interval between two calls to verification) and it might hide | |
5423 // any issues that we would like to catch during testing. | |
5424 if (free_regions_coming()) { | |
5425 return; | |
5426 } | |
5427 | |
2361 | 5428 // Make sure we append the secondary_free_list on the free_list so |
5429 // that all free regions we will come across can be safely | |
5430 // attributed to the free_list. | |
5431 append_secondary_free_list_if_not_empty_with_lock(); | |
2152 | 5432 |
5433 // Finally, make sure that the region accounting in the lists is | |
5434 // consistent with what we see in the heap. | |
5435 _humongous_set.verify_start(); | |
5436 _free_list.verify_start(); | |
5437 | |
5438 VerifyRegionListsClosure cl(&_humongous_set, &_free_list); | |
5439 heap_region_iterate(&cl); | |
5440 | |
5441 _humongous_set.verify_end(); | |
5442 _free_list.verify_end(); | |
342 | 5443 } |