Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3772:6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr
author | johnc |
---|---|
date | Tue, 14 Jun 2011 11:01:10 -0700 |
parents | c3f1170908be |
children | c9ca3f51cf41 |
rev | line source |
---|---|
342 | 1 /* |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
1972 | 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _cmThread->stop(); |
429 } | |
430 | |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
431 #ifdef ASSERT |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
432 // A region is added to the collection set as it is retired |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
433 // so an address p can point to a region which will be in the |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
434 // collection set but has not yet been retired. This method |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
435 // therefore is only accurate during a GC pause after all |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
436 // regions have been retired. It is used for debugging |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
437 // to check if an nmethod has references to objects that can |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
438 // be move during a partial collection. Though it can be |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
439 // inaccurate, it is sufficient for G1 because the conservative |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
440 // implementation of is_scavengable() for G1 will indicate that |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
441 // all nmethods must be scanned during a partial collection. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
442 bool G1CollectedHeap::is_in_partial_collection(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
443 HeapRegion* hr = heap_region_containing(p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
444 return hr != NULL && hr->in_collection_set(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
445 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
446 #endif |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
447 |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
448 // Returns true if the reference points to an object that |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
449 // can move in an incremental collecction. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
450 bool G1CollectedHeap::is_scavengable(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
451 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
452 G1CollectorPolicy* g1p = g1h->g1_policy(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
453 HeapRegion* hr = heap_region_containing(p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
454 if (hr == NULL) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
455 // perm gen (or null) |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
456 return false; |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
457 } else { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
458 return !hr->isHumongous(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
459 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
460 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
461 |
342 | 462 void G1CollectedHeap::check_ct_logs_at_safepoint() { |
463 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
464 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
465 | |
466 // Count the dirty cards at the start. | |
467 CountNonCleanMemRegionClosure count1(this); | |
468 ct_bs->mod_card_iterate(&count1); | |
469 int orig_count = count1.n(); | |
470 | |
471 // First clear the logged cards. | |
472 ClearLoggedCardTableEntryClosure clear; | |
473 dcqs.set_closure(&clear); | |
474 dcqs.apply_closure_to_all_completed_buffers(); | |
475 dcqs.iterate_closure_all_threads(false); | |
476 clear.print_histo(); | |
477 | |
478 // Now ensure that there's no dirty cards. | |
479 CountNonCleanMemRegionClosure count2(this); | |
480 ct_bs->mod_card_iterate(&count2); | |
481 if (count2.n() != 0) { | |
482 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
483 count2.n(), orig_count); | |
484 } | |
485 guarantee(count2.n() == 0, "Card table should be clean."); | |
486 | |
487 RedirtyLoggedCardTableEntryClosure redirty; | |
488 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
489 dcqs.apply_closure_to_all_completed_buffers(); | |
490 dcqs.iterate_closure_all_threads(false); | |
491 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
492 clear.calls(), orig_count); | |
493 guarantee(redirty.calls() == clear.calls(), | |
494 "Or else mechanism is broken."); | |
495 | |
496 CountNonCleanMemRegionClosure count3(this); | |
497 ct_bs->mod_card_iterate(&count3); | |
498 if (count3.n() != orig_count) { | |
499 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
500 orig_count, count3.n()); | |
501 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
502 } | |
503 | |
504 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
505 } | |
506 | |
507 // Private class members. | |
508 | |
509 G1CollectedHeap* G1CollectedHeap::_g1h; | |
510 | |
511 // Private methods. | |
512 | |
2152 | 513 HeapRegion* |
2361 | 514 G1CollectedHeap::new_region_try_secondary_free_list() { |
2152 | 515 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
516 while (!_secondary_free_list.is_empty() || free_regions_coming()) { | |
517 if (!_secondary_free_list.is_empty()) { | |
518 if (G1ConcRegionFreeingVerbose) { | |
519 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
520 "secondary_free_list has "SIZE_FORMAT" entries", | |
521 _secondary_free_list.length()); | |
522 } | |
523 // It looks as if there are free regions available on the | |
524 // secondary_free_list. Let's move them to the free_list and try | |
525 // again to allocate from it. | |
526 append_secondary_free_list(); | |
527 | |
528 assert(!_free_list.is_empty(), "if the secondary_free_list was not " | |
529 "empty we should have moved at least one entry to the free_list"); | |
530 HeapRegion* res = _free_list.remove_head(); | |
531 if (G1ConcRegionFreeingVerbose) { | |
532 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
533 "allocated "HR_FORMAT" from secondary_free_list", | |
534 HR_FORMAT_PARAMS(res)); | |
535 } | |
536 return res; | |
537 } | |
538 | |
539 // Wait here until we get notifed either when (a) there are no | |
540 // more free regions coming or (b) some regions have been moved on | |
541 // the secondary_free_list. | |
542 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
543 } | |
544 | |
545 if (G1ConcRegionFreeingVerbose) { | |
546 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
547 "could not allocate from secondary_free_list"); | |
548 } | |
549 return NULL; | |
550 } | |
551 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
552 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { |
2152 | 553 assert(!isHumongous(word_size) || |
554 word_size <= (size_t) HeapRegion::GrainWords, | |
555 "the only time we use this to allocate a humongous region is " | |
556 "when we are allocating a single humongous region"); | |
557 | |
558 HeapRegion* res; | |
559 if (G1StressConcRegionFreeing) { | |
560 if (!_secondary_free_list.is_empty()) { | |
561 if (G1ConcRegionFreeingVerbose) { | |
562 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
563 "forced to look at the secondary_free_list"); | |
564 } | |
2361 | 565 res = new_region_try_secondary_free_list(); |
2152 | 566 if (res != NULL) { |
567 return res; | |
568 } | |
569 } | |
570 } | |
571 res = _free_list.remove_head_or_null(); | |
572 if (res == NULL) { | |
573 if (G1ConcRegionFreeingVerbose) { | |
574 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
575 "res == NULL, trying the secondary_free_list"); | |
576 } | |
2361 | 577 res = new_region_try_secondary_free_list(); |
2152 | 578 } |
342 | 579 if (res == NULL && do_expand) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
580 if (expand(word_size * HeapWordSize)) { |
3766 | 581 // Even though the heap was expanded, it might not have reached |
582 // the desired size. So, we cannot assume that the allocation | |
583 // will succeed. | |
584 res = _free_list.remove_head_or_null(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
585 } |
342 | 586 } |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
587 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
588 if (G1PrintHeapRegions) { |
3766 | 589 gclog_or_tty->print_cr("new alloc region "HR_FORMAT, |
590 HR_FORMAT_PARAMS(res)); | |
342 | 591 } |
592 } | |
593 return res; | |
594 } | |
595 | |
2152 | 596 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, |
597 size_t word_size) { | |
342 | 598 HeapRegion* alloc_region = NULL; |
599 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
600 alloc_region = new_region(word_size, true /* do_expand */); |
342 | 601 if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
545 | 602 alloc_region->set_survivor(); |
342 | 603 } |
604 ++_gc_alloc_region_counts[purpose]; | |
605 } else { | |
606 g1_policy()->note_alloc_region_limit_reached(purpose); | |
607 } | |
608 return alloc_region; | |
609 } | |
610 | |
3766 | 611 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, |
612 size_t word_size) { | |
2361 | 613 assert(isHumongous(word_size), "word_size should be humongous"); |
614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
615 | |
3766 | 616 size_t first = G1_NULL_HRS_INDEX; |
2152 | 617 if (num_regions == 1) { |
618 // Only one region to allocate, no need to go through the slower | |
619 // path. The caller will attempt the expasion if this fails, so | |
620 // let's not try to expand here too. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
621 HeapRegion* hr = new_region(word_size, false /* do_expand */); |
2152 | 622 if (hr != NULL) { |
623 first = hr->hrs_index(); | |
624 } else { | |
3766 | 625 first = G1_NULL_HRS_INDEX; |
2152 | 626 } |
627 } else { | |
628 // We can't allocate humongous regions while cleanupComplete() is | |
629 // running, since some of the regions we find to be empty might not | |
630 // yet be added to the free list and it is not straightforward to | |
631 // know which list they are on so that we can remove them. Note | |
632 // that we only need to do this if we need to allocate more than | |
633 // one region to satisfy the current humongous allocation | |
634 // request. If we are only allocating one region we use the common | |
635 // region allocation code (see above). | |
636 wait_while_free_regions_coming(); | |
2361 | 637 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 638 |
639 if (free_regions() >= num_regions) { | |
3766 | 640 first = _hrs.find_contiguous(num_regions); |
641 if (first != G1_NULL_HRS_INDEX) { | |
642 for (size_t i = first; i < first + num_regions; ++i) { | |
643 HeapRegion* hr = region_at(i); | |
2152 | 644 assert(hr->is_empty(), "sanity"); |
2361 | 645 assert(is_on_master_free_list(hr), "sanity"); |
2152 | 646 hr->set_pending_removal(true); |
647 } | |
648 _free_list.remove_all_pending(num_regions); | |
649 } | |
650 } | |
651 } | |
652 return first; | |
653 } | |
654 | |
2361 | 655 HeapWord* |
3766 | 656 G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, |
2361 | 657 size_t num_regions, |
658 size_t word_size) { | |
3766 | 659 assert(first != G1_NULL_HRS_INDEX, "pre-condition"); |
2361 | 660 assert(isHumongous(word_size), "word_size should be humongous"); |
661 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); | |
662 | |
663 // Index of last region in the series + 1. | |
3766 | 664 size_t last = first + num_regions; |
2361 | 665 |
666 // We need to initialize the region(s) we just discovered. This is | |
667 // a bit tricky given that it can happen concurrently with | |
668 // refinement threads refining cards on these regions and | |
669 // potentially wanting to refine the BOT as they are scanning | |
670 // those cards (this can happen shortly after a cleanup; see CR | |
671 // 6991377). So we have to set up the region(s) carefully and in | |
672 // a specific order. | |
673 | |
674 // The word size sum of all the regions we will allocate. | |
675 size_t word_size_sum = num_regions * HeapRegion::GrainWords; | |
676 assert(word_size <= word_size_sum, "sanity"); | |
677 | |
678 // This will be the "starts humongous" region. | |
3766 | 679 HeapRegion* first_hr = region_at(first); |
2361 | 680 // The header of the new object will be placed at the bottom of |
681 // the first region. | |
682 HeapWord* new_obj = first_hr->bottom(); | |
683 // This will be the new end of the first region in the series that | |
684 // should also match the end of the last region in the seriers. | |
685 HeapWord* new_end = new_obj + word_size_sum; | |
686 // This will be the new top of the first region that will reflect | |
687 // this allocation. | |
688 HeapWord* new_top = new_obj + word_size; | |
689 | |
690 // First, we need to zero the header of the space that we will be | |
691 // allocating. When we update top further down, some refinement | |
692 // threads might try to scan the region. By zeroing the header we | |
693 // ensure that any thread that will try to scan the region will | |
694 // come across the zero klass word and bail out. | |
695 // | |
696 // NOTE: It would not have been correct to have used | |
697 // CollectedHeap::fill_with_object() and make the space look like | |
698 // an int array. The thread that is doing the allocation will | |
699 // later update the object header to a potentially different array | |
700 // type and, for a very short period of time, the klass and length | |
701 // fields will be inconsistent. This could cause a refinement | |
702 // thread to calculate the object size incorrectly. | |
703 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); | |
704 | |
705 // We will set up the first region as "starts humongous". This | |
706 // will also update the BOT covering all the regions to reflect | |
707 // that there is a single object that starts at the bottom of the | |
708 // first region. | |
709 first_hr->set_startsHumongous(new_top, new_end); | |
710 | |
711 // Then, if there are any, we will set up the "continues | |
712 // humongous" regions. | |
713 HeapRegion* hr = NULL; | |
3766 | 714 for (size_t i = first + 1; i < last; ++i) { |
715 hr = region_at(i); | |
2361 | 716 hr->set_continuesHumongous(first_hr); |
717 } | |
718 // If we have "continues humongous" regions (hr != NULL), then the | |
719 // end of the last one should match new_end. | |
720 assert(hr == NULL || hr->end() == new_end, "sanity"); | |
721 | |
722 // Up to this point no concurrent thread would have been able to | |
723 // do any scanning on any region in this series. All the top | |
724 // fields still point to bottom, so the intersection between | |
725 // [bottom,top] and [card_start,card_end] will be empty. Before we | |
726 // update the top fields, we'll do a storestore to make sure that | |
727 // no thread sees the update to top before the zeroing of the | |
728 // object header and the BOT initialization. | |
729 OrderAccess::storestore(); | |
730 | |
731 // Now that the BOT and the object header have been initialized, | |
732 // we can update top of the "starts humongous" region. | |
733 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), | |
734 "new_top should be in this region"); | |
735 first_hr->set_top(new_top); | |
736 | |
737 // Now, we will update the top fields of the "continues humongous" | |
738 // regions. The reason we need to do this is that, otherwise, | |
739 // these regions would look empty and this will confuse parts of | |
740 // G1. For example, the code that looks for a consecutive number | |
741 // of empty regions will consider them empty and try to | |
742 // re-allocate them. We can extend is_empty() to also include | |
743 // !continuesHumongous(), but it is easier to just update the top | |
744 // fields here. The way we set top for all regions (i.e., top == | |
745 // end for all regions but the last one, top == new_top for the | |
746 // last one) is actually used when we will free up the humongous | |
747 // region in free_humongous_region(). | |
748 hr = NULL; | |
3766 | 749 for (size_t i = first + 1; i < last; ++i) { |
750 hr = region_at(i); | |
2361 | 751 if ((i + 1) == last) { |
752 // last continues humongous region | |
753 assert(hr->bottom() < new_top && new_top <= hr->end(), | |
754 "new_top should fall on this region"); | |
755 hr->set_top(new_top); | |
756 } else { | |
757 // not last one | |
758 assert(new_top > hr->end(), "new_top should be above this region"); | |
759 hr->set_top(hr->end()); | |
760 } | |
761 } | |
762 // If we have continues humongous regions (hr != NULL), then the | |
763 // end of the last one should match new_end and its top should | |
764 // match new_top. | |
765 assert(hr == NULL || | |
766 (hr->end() == new_end && hr->top() == new_top), "sanity"); | |
767 | |
768 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); | |
769 _summary_bytes_used += first_hr->used(); | |
770 _humongous_set.add(first_hr); | |
771 | |
772 return new_obj; | |
773 } | |
774 | |
342 | 775 // If could fit into free regions w/o expansion, try. |
776 // Otherwise, if can expand, do so. | |
777 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 778 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
2152 | 779 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
780 | |
781 verify_region_sets_optional(); | |
342 | 782 |
783 size_t num_regions = | |
1973 | 784 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 785 size_t x_size = expansion_regions(); |
3766 | 786 size_t fs = _hrs.free_suffix(); |
787 size_t first = humongous_obj_allocate_find_first(num_regions, word_size); | |
788 if (first == G1_NULL_HRS_INDEX) { | |
2152 | 789 // The only thing we can do now is attempt expansion. |
342 | 790 if (fs + x_size >= num_regions) { |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
791 // If the number of regions we're trying to allocate for this |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
792 // object is at most the number of regions in the free suffix, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
793 // then the call to humongous_obj_allocate_find_first() above |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
794 // should have succeeded and we wouldn't be here. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
795 // |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
796 // We should only be trying to expand when the free suffix is |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
797 // not sufficient for the object _and_ we have some expansion |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
798 // room available. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
799 assert(num_regions > fs, "earlier allocation should have succeeded"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
800 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
801 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { |
3766 | 802 // Even though the heap was expanded, it might not have |
803 // reached the desired size. So, we cannot assume that the | |
804 // allocation will succeed. | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
805 first = humongous_obj_allocate_find_first(num_regions, word_size); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
806 } |
2152 | 807 } |
808 } | |
809 | |
2361 | 810 HeapWord* result = NULL; |
3766 | 811 if (first != G1_NULL_HRS_INDEX) { |
2361 | 812 result = |
813 humongous_obj_allocate_initialize_regions(first, num_regions, word_size); | |
814 assert(result != NULL, "it should always return a valid result"); | |
2152 | 815 } |
816 | |
817 verify_region_sets_optional(); | |
2361 | 818 |
819 return result; | |
342 | 820 } |
821 | |
1973 | 822 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
823 assert_heap_not_locked_and_not_at_safepoint(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
824 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
825 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
826 unsigned int dummy_gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
827 return attempt_allocation(word_size, &dummy_gc_count_before); |
342 | 828 } |
829 | |
830 HeapWord* | |
831 G1CollectedHeap::mem_allocate(size_t word_size, | |
832 bool is_noref, | |
833 bool is_tlab, | |
1973 | 834 bool* gc_overhead_limit_was_exceeded) { |
835 assert_heap_not_locked_and_not_at_safepoint(); | |
836 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
837 "to allocate TLABs"); | |
342 | 838 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
839 // Loop until the allocation is satisified, or unsatisfied after GC. |
1973 | 840 for (int try_count = 1; /* we'll return */; try_count += 1) { |
841 unsigned int gc_count_before; | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
842 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
843 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
844 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
845 result = attempt_allocation(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
846 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
847 result = attempt_allocation_humongous(word_size, &gc_count_before); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
848 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
849 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
850 return result; |
342 | 851 } |
852 | |
853 // Create the garbage collection operation... | |
1973 | 854 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 855 // ...and get the VM thread to execute it. |
856 VMThread::execute(&op); | |
1973 | 857 |
858 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
859 // If the operation was successful we'll return the result even | |
860 // if it is NULL. If the allocation attempt failed immediately | |
861 // after a Full GC, it's unlikely we'll be able to allocate now. | |
862 HeapWord* result = op.result(); | |
863 if (result != NULL && !isHumongous(word_size)) { | |
864 // Allocations that take place on VM operations do not do any | |
865 // card dirtying and we have to do it here. We only have to do | |
866 // this for non-humongous allocations, though. | |
867 dirty_young_block(result, word_size); | |
868 } | |
342 | 869 return result; |
1973 | 870 } else { |
871 assert(op.result() == NULL, | |
872 "the result should be NULL if the VM op did not succeed"); | |
342 | 873 } |
874 | |
875 // Give a warning if we seem to be looping forever. | |
876 if ((QueuedAllocationWarningCount > 0) && | |
877 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 878 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 879 } |
880 } | |
1973 | 881 |
882 ShouldNotReachHere(); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
883 return NULL; |
342 | 884 } |
885 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
886 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
887 unsigned int *gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
888 // Make sure you read the note in attempt_allocation_humongous(). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
889 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
890 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
891 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
892 "be called for humongous allocation requests"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
893 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
894 // We should only get here after the first-level allocation attempt |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
895 // (attempt_allocation()) failed to allocate. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
896 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
897 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
898 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
899 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
900 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
901 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
902 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
903 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
904 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
905 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
906 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
907 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
908 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
909 result = _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
910 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
911 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
912 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
913 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
914 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
915 // If we reach here, attempt_allocation_locked() above failed to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
916 // allocate a new region. So the mutator alloc region should be NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
917 assert(_mutator_alloc_region.get() == NULL, "only way to get here"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
918 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
919 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
920 if (g1_policy()->can_expand_young_list()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
921 result = _mutator_alloc_region.attempt_allocation_force(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
922 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
923 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
924 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
925 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
926 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
927 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
928 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
929 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
930 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
931 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
932 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
933 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
934 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
935 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
936 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
937 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
938 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
939 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
940 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
941 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
942 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
943 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
944 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
945 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
946 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
947 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
948 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
949 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
950 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
951 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
952 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
953 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
954 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
955 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
956 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
957 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
958 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
959 // performed a collection and reclaimed enough space. We do the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
960 // first attempt (without holding the Heap_lock) here and the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
961 // follow-on attempt will be at the start of the next loop |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
962 // iteration (after taking the Heap_lock). |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
963 result = _mutator_alloc_region.attempt_allocation(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
964 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
965 if (result != NULL ){ |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
966 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
967 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
968 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
969 // Give a warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
970 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
971 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
972 warning("G1CollectedHeap::attempt_allocation_slow() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
973 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
974 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
975 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
976 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
977 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
978 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
979 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
980 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
981 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
982 unsigned int * gc_count_before_ret) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
983 // The structure of this method has a lot of similarities to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
984 // attempt_allocation_slow(). The reason these two were not merged |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
985 // into a single one is that such a method would require several "if |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
986 // allocation is not humongous do this, otherwise do that" |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
987 // conditional paths which would obscure its flow. In fact, an early |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
988 // version of this code did use a unified method which was harder to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
989 // follow and, as a result, it had subtle bugs that were hard to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
990 // track down. So keeping these two methods separate allows each to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
991 // be more readable. It will be good to keep these two in sync as |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
992 // much as possible. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
993 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
994 assert_heap_not_locked_and_not_at_safepoint(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
995 assert(isHumongous(word_size), "attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
996 "should only be called for humongous allocations"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
997 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
998 // We will loop until a) we manage to successfully perform the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
999 // allocation or b) we successfully schedule a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1000 // fails to perform the allocation. b) is the only case when we'll |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1001 // return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1002 HeapWord* result = NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1003 for (int try_count = 1; /* we'll return */; try_count += 1) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1004 bool should_try_gc; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1005 unsigned int gc_count_before; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1006 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1007 { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1008 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1009 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1010 // Given that humongous objects are not allocated in young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1011 // regions, we'll first try to do the allocation without doing a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1012 // collection hoping that there's enough space in the heap. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1013 result = humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1014 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1015 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1016 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1017 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1018 if (GC_locker::is_active_and_needs_gc()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1019 should_try_gc = false; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1020 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1021 // Read the GC count while still holding the Heap_lock. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1022 gc_count_before = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1023 should_try_gc = true; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1024 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1025 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1026 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1027 if (should_try_gc) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1028 // If we failed to allocate the humongous object, we should try to |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1029 // do a collection pause (if we're allowed) in case it reclaims |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1030 // enough space for the allocation to succeed after the pause. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1031 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1032 bool succeeded; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1033 result = do_collection_pause(word_size, gc_count_before, &succeeded); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1034 if (result != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1035 assert(succeeded, "only way to get back a non-NULL result"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1036 return result; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1037 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1038 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1039 if (succeeded) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1040 // If we get here we successfully scheduled a collection which |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1041 // failed to allocate. No point in trying to allocate |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1042 // further. We'll just return NULL. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1043 MutexLockerEx x(Heap_lock); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1044 *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1045 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1046 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1047 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1048 GC_locker::stall_until_clear(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1049 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1050 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1051 // We can reach here if we were unsuccessul in scheduling a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1052 // collection (because another thread beat us to it) or if we were |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1053 // stalled due to the GC locker. In either can we should retry the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1054 // allocation attempt in case another thread successfully |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1055 // performed a collection and reclaimed enough space. Give a |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1056 // warning if we seem to be looping forever. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1057 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1058 if ((QueuedAllocationWarningCount > 0) && |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1059 (try_count % QueuedAllocationWarningCount == 0)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1060 warning("G1CollectedHeap::attempt_allocation_humongous() " |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1061 "retries %d times", try_count); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1062 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1063 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1064 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1065 ShouldNotReachHere(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1066 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1067 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1068 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1069 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1070 bool expect_null_mutator_alloc_region) { |
2152 | 1071 assert_at_safepoint(true /* should_be_vm_thread */); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1072 assert(_mutator_alloc_region.get() == NULL || |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1073 !expect_null_mutator_alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1074 "the current alloc region was unexpectedly found to be non-NULL"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1075 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1076 if (!isHumongous(word_size)) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1077 return _mutator_alloc_region.attempt_allocation_locked(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1078 false /* bot_updates */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1079 } else { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1080 return humongous_obj_allocate(word_size); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1081 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1082 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1083 ShouldNotReachHere(); |
342 | 1084 } |
1085 | |
636 | 1086 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1087 // first, make sure that the GC alloc region list is empty (it should!) | |
1088 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1089 release_gc_alloc_regions(true /* totally */); | |
1090 } | |
1091 | |
342 | 1092 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1093 ModRefBarrierSet* _mr_bs; | |
1094 public: | |
1095 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1096 bool doHeapRegion(HeapRegion* r) { | |
1097 r->reset_gc_time_stamp(); | |
1098 if (r->continuesHumongous()) | |
1099 return false; | |
1100 HeapRegionRemSet* hrrs = r->rem_set(); | |
1101 if (hrrs != NULL) hrrs->clear(); | |
1102 // You might think here that we could clear just the cards | |
1103 // corresponding to the used region. But no: if we leave a dirty card | |
1104 // in a region we might allocate into, then it would prevent that card | |
1105 // from being enqueued, and cause it to be missed. | |
1106 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1107 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1108 return false; | |
1109 } | |
1110 }; | |
1111 | |
1112 | |
1113 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1114 ModRefBarrierSet* _mr_bs; | |
1115 public: | |
1116 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1117 bool doHeapRegion(HeapRegion* r) { | |
1118 if (r->continuesHumongous()) return false; | |
1119 if (r->used_region().word_size() != 0) { | |
1120 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1121 } | |
1122 return false; | |
1123 } | |
1124 }; | |
1125 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1126 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1127 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1128 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1129 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1130 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1131 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1132 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1133 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1134 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1135 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1136 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1137 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1138 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1139 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1140 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1141 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1142 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1143 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1144 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1145 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1146 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1147 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1148 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1149 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1150 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1151 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1152 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1153 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1154 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1155 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1156 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1157 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1158 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1159 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1160 |
1973 | 1161 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1162 bool clear_all_soft_refs, |
342 | 1163 size_t word_size) { |
2152 | 1164 assert_at_safepoint(true /* should_be_vm_thread */); |
1165 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1166 if (GC_locker::check_active_before_gc()) { |
1973 | 1167 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1168 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1169 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
1170 SvcGCMarker sgcm(SvcGCMarker::FULL); |
342 | 1171 ResourceMark rm; |
1172 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1173 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1174 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1175 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1176 |
2152 | 1177 verify_region_sets_optional(); |
342 | 1178 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1179 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1180 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1181 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1182 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1183 |
342 | 1184 { |
1185 IsGCActiveMark x; | |
1186 | |
1187 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1188 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1189 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1190 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1191 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1192 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1193 PrintGC, true, gclog_or_tty); |
342 | 1194 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
1195 TraceCollectorStats tcs(g1mm()->full_collection_counters()); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3323
diff
changeset
|
1196 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1197 |
342 | 1198 double start = os::elapsedTime(); |
1199 g1_policy()->record_full_collection_start(); | |
1200 | |
2152 | 1201 wait_while_free_regions_coming(); |
2361 | 1202 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 1203 |
342 | 1204 gc_prologue(true); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1205 increment_total_collections(true /* full gc */); |
342 | 1206 |
1207 size_t g1h_prev_used = used(); | |
1208 assert(used() == recalculate_used(), "Should be equal"); | |
1209 | |
1210 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1211 HandleMark hm; // Discard invalid handles created during verification | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1212 gclog_or_tty->print(" VerifyBeforeGC:"); |
342 | 1213 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1214 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1215 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1216 /* option */ VerifyOption_G1UsePrevMarking); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1217 |
342 | 1218 } |
1219 | |
1220 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1221 | |
1222 // We want to discover references, but not process them yet. | |
1223 // This mode is disabled in | |
1224 // instanceRefKlass::process_discovered_references if the | |
1225 // generation does some collection work, or | |
1226 // instanceRefKlass::enqueue_discovered_references if the | |
1227 // generation returns without doing any work. | |
1228 ref_processor()->disable_discovery(); | |
1229 ref_processor()->abandon_partial_discovery(); | |
1230 ref_processor()->verify_no_references_recorded(); | |
1231 | |
1232 // Abandon current iterations of concurrent marking and concurrent | |
1233 // refinement, if any are in progress. | |
1234 concurrent_mark()->abort(); | |
1235 | |
1236 // Make sure we'll choose a new allocation region afterwards. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1237 release_mutator_alloc_region(); |
636 | 1238 abandon_gc_alloc_regions(); |
1861 | 1239 g1_rem_set()->cleanupHRRS(); |
342 | 1240 tear_down_region_lists(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1241 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1242 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1243 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1244 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1245 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1246 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1247 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1248 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1249 |
342 | 1250 if (g1_policy()->in_young_gc_mode()) { |
1251 empty_young_list(); | |
1252 g1_policy()->set_full_young_gcs(true); | |
1253 } | |
1254 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1255 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1256 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1257 |
342 | 1258 // Temporarily make reference _discovery_ single threaded (non-MT). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
1259 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); |
342 | 1260 |
1261 // Temporarily make refs discovery atomic | |
1262 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1263 | |
1264 // Temporarily clear _is_alive_non_header | |
1265 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1266 | |
1267 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1268 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1269 // Do collection work |
1270 { | |
1271 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1272 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1273 } |
2152 | 1274 assert(free_regions() == 0, "we should not have added any free regions"); |
342 | 1275 rebuild_region_lists(); |
1276 | |
1277 _summary_bytes_used = recalculate_used(); | |
1278 | |
1279 ref_processor()->enqueue_discovered_references(); | |
1280 | |
1281 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1282 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1283 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1284 |
342 | 1285 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1286 HandleMark hm; // Discard invalid handles created during verification | |
1287 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1288 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1289 Universe::verify(/* allow dirty */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1290 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1291 /* option */ VerifyOption_G1UsePrevMarking); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
1292 |
342 | 1293 } |
1294 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1295 | |
1296 reset_gc_time_stamp(); | |
1297 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1298 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1299 // sets. We will also reset the GC time stamps of the regions. |
342 | 1300 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1301 heap_region_iterate(&rs_clear); | |
1302 | |
1303 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1304 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1305 |
1306 if (_cg1r->use_cache()) { | |
1307 _cg1r->clear_and_record_card_counts(); | |
1308 _cg1r->clear_hot_cache(); | |
1309 } | |
1310 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1311 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1312 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1313 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1314 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1315 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1316 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1317 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1318 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1321 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1322 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1323 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1324 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1325 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1326 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1327 |
342 | 1328 if (PrintGC) { |
1329 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1330 } | |
1331 | |
1332 if (true) { // FIXME | |
1333 // Ask the permanent generation to adjust size for full collections | |
1334 perm()->compute_new_size(); | |
1335 } | |
1336 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1337 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1338 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1339 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1340 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1341 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1342 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1343 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1344 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1345 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1346 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1347 |
342 | 1348 double end = os::elapsedTime(); |
1349 g1_policy()->record_full_collection_end(); | |
1350 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1351 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1352 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1353 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1354 |
342 | 1355 gc_epilogue(true); |
1356 | |
794 | 1357 // Discard all rset updates |
1358 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1359 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1360 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1361 } |
1362 | |
1363 if (g1_policy()->in_young_gc_mode()) { | |
1364 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1365 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1366 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1367 assert( check_young_list_empty(true /* check_heap */), |
342 | 1368 "young list should be empty at this point"); |
1369 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1370 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1371 // Update the number of full collections that have been completed. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
1372 increment_full_collections_completed(false /* concurrent */); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1373 |
3766 | 1374 _hrs.verify_optional(); |
2152 | 1375 verify_region_sets_optional(); |
1376 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1377 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1378 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1379 } |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
1380 g1mm()->update_counters(); |
1973 | 1381 |
1382 return true; | |
342 | 1383 } |
1384 | |
1385 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1386 // do_collection() will return whether it succeeded in performing |
1387 // the GC. Currently, there is no facility on the | |
1388 // do_full_collection() API to notify the caller than the collection | |
1389 // did not succeed (e.g., because it was locked out by the GC | |
1390 // locker). So, right now, we'll ignore the return value. | |
1391 bool dummy = do_collection(true, /* explicit_gc */ | |
1392 clear_all_soft_refs, | |
1393 0 /* word_size */); | |
342 | 1394 } |
1395 | |
1396 // This code is mostly copied from TenuredGeneration. | |
1397 void | |
1398 G1CollectedHeap:: | |
1399 resize_if_necessary_after_full_collection(size_t word_size) { | |
1400 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1401 | |
1402 // Include the current allocation, if any, and bytes that will be | |
1403 // pre-allocated to support collections, as "used". | |
1404 const size_t used_after_gc = used(); | |
1405 const size_t capacity_after_gc = capacity(); | |
1406 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1407 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1408 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1409 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1410 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1411 |
342 | 1412 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1413 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1414 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1415 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1416 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1417 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1418 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1419 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1420 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1421 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1422 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1423 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1424 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1425 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1426 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1427 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1428 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1429 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1430 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1432 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1433 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1438 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1439 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1440 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1441 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1443 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1444 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1445 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1446 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1447 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1448 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1449 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1450 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1451 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1452 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1453 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1454 |
1455 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1456 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1457 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1458 gclog_or_tty->print_cr("Computing new size after full GC "); |
1459 gclog_or_tty->print_cr(" " | |
1460 " minimum_free_percentage: %6.2f", | |
1461 minimum_free_percentage); | |
1462 gclog_or_tty->print_cr(" " | |
1463 " maximum_free_percentage: %6.2f", | |
1464 maximum_free_percentage); | |
1465 gclog_or_tty->print_cr(" " | |
1466 " capacity: %6.1fK" | |
1467 " minimum_desired_capacity: %6.1fK" | |
1468 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1469 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1470 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1471 (double) maximum_desired_capacity / (double) K); |
342 | 1472 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1473 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1474 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1475 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1476 (double) used_after_gc / (double) K); |
342 | 1477 gclog_or_tty->print_cr(" " |
1478 " free_percentage: %6.2f", | |
1479 free_percentage); | |
1480 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1481 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1482 // Don't expand unless it's significant |
1483 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1484 if (expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1485 if (PrintGC && Verbose) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1486 gclog_or_tty->print_cr(" " |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1487 " expanding:" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1488 " max_heap_size: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1489 " minimum_desired_capacity: %6.1fK" |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1490 " expand_bytes: %6.1fK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1491 (double) max_heap_size / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1492 (double) minimum_desired_capacity / (double) K, |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1493 (double) expand_bytes / (double) K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1494 } |
342 | 1495 } |
1496 | |
1497 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1498 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1499 // Capacity too large, compute shrinking size |
1500 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1501 shrink(shrink_bytes); | |
1502 if (PrintGC && Verbose) { | |
1503 gclog_or_tty->print_cr(" " | |
1504 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1505 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1506 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1507 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1508 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1509 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1510 (double) shrink_bytes / (double) K); |
342 | 1511 } |
1512 } | |
1513 } | |
1514 | |
1515 | |
1516 HeapWord* | |
1973 | 1517 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1518 bool* succeeded) { | |
2152 | 1519 assert_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1520 |
1521 *succeeded = true; | |
1522 // Let's attempt the allocation first. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1523 HeapWord* result = |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1524 attempt_allocation_at_safepoint(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1525 false /* expect_null_mutator_alloc_region */); |
1973 | 1526 if (result != NULL) { |
1527 assert(*succeeded, "sanity"); | |
1528 return result; | |
1529 } | |
342 | 1530 |
1531 // In a G1 heap, we're supposed to keep allocation from failing by | |
1532 // incremental pauses. Therefore, at least for now, we'll favor | |
1533 // expansion over collection. (This might change in the future if we can | |
1534 // do something smarter than full collection to satisfy a failed alloc.) | |
1535 result = expand_and_allocate(word_size); | |
1536 if (result != NULL) { | |
1973 | 1537 assert(*succeeded, "sanity"); |
342 | 1538 return result; |
1539 } | |
1540 | |
1973 | 1541 // Expansion didn't work, we'll try to do a Full GC. |
1542 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1543 false, /* clear_all_soft_refs */ | |
1544 word_size); | |
1545 if (!gc_succeeded) { | |
1546 *succeeded = false; | |
1547 return NULL; | |
1548 } | |
1549 | |
1550 // Retry the allocation | |
1551 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1552 true /* expect_null_mutator_alloc_region */); |
342 | 1553 if (result != NULL) { |
1973 | 1554 assert(*succeeded, "sanity"); |
342 | 1555 return result; |
1556 } | |
1557 | |
1973 | 1558 // Then, try a Full GC that will collect all soft references. |
1559 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1560 true, /* clear_all_soft_refs */ | |
1561 word_size); | |
1562 if (!gc_succeeded) { | |
1563 *succeeded = false; | |
1564 return NULL; | |
1565 } | |
1566 | |
1567 // Retry the allocation once more | |
1568 result = attempt_allocation_at_safepoint(word_size, | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1569 true /* expect_null_mutator_alloc_region */); |
342 | 1570 if (result != NULL) { |
1973 | 1571 assert(*succeeded, "sanity"); |
342 | 1572 return result; |
1573 } | |
1574 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1575 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1576 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1577 |
342 | 1578 // What else? We might try synchronous finalization later. If the total |
1579 // space available is large enough for the allocation, then a more | |
1580 // complete compaction phase than we've tried so far might be | |
1581 // appropriate. | |
1973 | 1582 assert(*succeeded, "sanity"); |
342 | 1583 return NULL; |
1584 } | |
1585 | |
1586 // Attempting to expand the heap sufficiently | |
1587 // to support an allocation of the given "word_size". If | |
1588 // successful, perform the allocation and return the address of the | |
1589 // allocated block, or else "NULL". | |
1590 | |
1591 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
2152 | 1592 assert_at_safepoint(true /* should_be_vm_thread */); |
1593 | |
1594 verify_region_sets_optional(); | |
1973 | 1595 |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1596 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1597 if (expand(expand_bytes)) { |
3766 | 1598 _hrs.verify_optional(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1599 verify_region_sets_optional(); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1600 return attempt_allocation_at_safepoint(word_size, |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1601 false /* expect_null_mutator_alloc_region */); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1602 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1603 return NULL; |
342 | 1604 } |
1605 | |
3766 | 1606 void G1CollectedHeap::update_committed_space(HeapWord* old_end, |
1607 HeapWord* new_end) { | |
1608 assert(old_end != new_end, "don't call this otherwise"); | |
1609 assert((HeapWord*) _g1_storage.high() == new_end, "invariant"); | |
1610 | |
1611 // Update the committed mem region. | |
1612 _g1_committed.set_end(new_end); | |
1613 // Tell the card table about the update. | |
1614 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1615 // Tell the BOT about the update. | |
1616 _bot_shared->resize(_g1_committed.word_size()); | |
1617 } | |
1618 | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1619 bool G1CollectedHeap::expand(size_t expand_bytes) { |
342 | 1620 size_t old_mem_size = _g1_storage.committed_size(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1621 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
342 | 1622 aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
1623 HeapRegion::GrainBytes); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1624 |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1625 if (Verbose && PrintGC) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1626 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1627 old_mem_size/K, aligned_expand_bytes/K); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1628 } |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1629 |
3766 | 1630 // First commit the memory. |
1631 HeapWord* old_end = (HeapWord*) _g1_storage.high(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1632 bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1633 if (successful) { |
3766 | 1634 // Then propagate this update to the necessary data structures. |
1635 HeapWord* new_end = (HeapWord*) _g1_storage.high(); | |
1636 update_committed_space(old_end, new_end); | |
1637 | |
1638 FreeRegionList expansion_list("Local Expansion List"); | |
1639 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list); | |
1640 assert(mr.start() == old_end, "post-condition"); | |
1641 // mr might be a smaller region than what was requested if | |
1642 // expand_by() was unable to allocate the HeapRegion instances | |
1643 assert(mr.end() <= new_end, "post-condition"); | |
1644 | |
1645 size_t actual_expand_bytes = mr.byte_size(); | |
1646 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); | |
1647 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), | |
1648 "post-condition"); | |
1649 if (actual_expand_bytes < aligned_expand_bytes) { | |
1650 // We could not expand _hrs to the desired size. In this case we | |
1651 // need to shrink the committed space accordingly. | |
1652 assert(mr.end() < new_end, "invariant"); | |
1653 | |
1654 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes; | |
1655 // First uncommit the memory. | |
1656 _g1_storage.shrink_by(diff_bytes); | |
1657 // Then propagate this update to the necessary data structures. | |
1658 update_committed_space(new_end, mr.end()); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1659 } |
3766 | 1660 _free_list.add_as_tail(&expansion_list); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1661 } else { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1662 // The expansion of the virtual storage space was unsuccessful. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1663 // Let's see if it was because we ran out of swap. |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1664 if (G1ExitOnExpansionFailure && |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1665 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1666 // We had head room... |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1667 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); |
342 | 1668 } |
1669 } | |
2152 | 1670 |
342 | 1671 if (Verbose && PrintGC) { |
1672 size_t new_mem_size = _g1_storage.committed_size(); | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1673 gclog_or_tty->print_cr("...%s, expanded to %ldK", |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1674 (successful ? "Successful" : "Failed"), |
342 | 1675 new_mem_size/K); |
1676 } | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1677 return successful; |
342 | 1678 } |
1679 | |
3766 | 1680 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { |
342 | 1681 size_t old_mem_size = _g1_storage.committed_size(); |
1682 size_t aligned_shrink_bytes = | |
1683 ReservedSpace::page_align_size_down(shrink_bytes); | |
1684 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1685 HeapRegion::GrainBytes); | |
1686 size_t num_regions_deleted = 0; | |
3766 | 1687 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); |
1688 HeapWord* old_end = (HeapWord*) _g1_storage.high(); | |
1689 assert(mr.end() == old_end, "post-condition"); | |
1690 if (mr.byte_size() > 0) { | |
342 | 1691 _g1_storage.shrink_by(mr.byte_size()); |
3766 | 1692 HeapWord* new_end = (HeapWord*) _g1_storage.high(); |
1693 assert(mr.start() == new_end, "post-condition"); | |
1694 | |
1695 _expansion_regions += num_regions_deleted; | |
1696 update_committed_space(old_end, new_end); | |
1697 HeapRegionRemSet::shrink_heap(n_regions()); | |
1698 | |
1699 if (Verbose && PrintGC) { | |
1700 size_t new_mem_size = _g1_storage.committed_size(); | |
1701 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1702 old_mem_size/K, aligned_shrink_bytes/K, | |
1703 new_mem_size/K); | |
1704 } | |
342 | 1705 } |
1706 } | |
1707 | |
1708 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
2152 | 1709 verify_region_sets_optional(); |
1710 | |
636 | 1711 release_gc_alloc_regions(true /* totally */); |
2152 | 1712 // Instead of tearing down / rebuilding the free lists here, we |
1713 // could instead use the remove_all_pending() method on free_list to | |
1714 // remove only the ones that we need to remove. | |
342 | 1715 tear_down_region_lists(); // We will rebuild them in a moment. |
1716 shrink_helper(shrink_bytes); | |
1717 rebuild_region_lists(); | |
2152 | 1718 |
3766 | 1719 _hrs.verify_optional(); |
2152 | 1720 verify_region_sets_optional(); |
342 | 1721 } |
1722 | |
1723 // Public methods. | |
1724 | |
1725 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1726 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1727 #endif // _MSC_VER | |
1728 | |
1729 | |
1730 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1731 SharedHeap(policy_), | |
1732 _g1_policy(policy_), | |
1111 | 1733 _dirty_card_queue_set(false), |
1705 | 1734 _into_cset_dirty_card_queue_set(false), |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
1735 _is_alive_closure(this), |
342 | 1736 _ref_processor(NULL), |
1737 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1738 _bot_shared(NULL), | |
1739 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1740 _evac_failure_scan_stack(NULL) , | |
1741 _mark_in_progress(false), | |
2152 | 1742 _cg1r(NULL), _summary_bytes_used(0), |
342 | 1743 _refine_cte_cl(NULL), |
1744 _full_collection(false), | |
2152 | 1745 _free_list("Master Free List"), |
1746 _secondary_free_list("Secondary Free List"), | |
1747 _humongous_set("Master Humongous Set"), | |
1748 _free_regions_coming(false), | |
342 | 1749 _young_list(new YoungList(this)), |
1750 _gc_time_stamp(0), | |
526 | 1751 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1752 _full_collections_completed(0), |
526 | 1753 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1754 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1755 _dirty_cards_region_list(NULL) { |
342 | 1756 _g1h = this; // To catch bugs. |
1757 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1758 vm_exit_during_initialization("Failed necessary allocation."); | |
1759 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1760 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1761 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1762 |
342 | 1763 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1764 _task_queues = new RefToScanQueueSet(n_queues); | |
1765 | |
1766 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1767 assert(n_rem_sets > 0, "Invariant."); | |
1768 | |
1769 HeapRegionRemSetIterator** iter_arr = | |
1770 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1771 for (int i = 0; i < n_queues; i++) { | |
1772 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1773 } | |
1774 _rem_set_iterator = iter_arr; | |
1775 | |
1776 for (int i = 0; i < n_queues; i++) { | |
1777 RefToScanQueue* q = new RefToScanQueue(); | |
1778 q->initialize(); | |
1779 _task_queues->register_queue(i, q); | |
1780 } | |
1781 | |
1782 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1783 _gc_alloc_regions[ap] = NULL; |
1784 _gc_alloc_region_counts[ap] = 0; | |
1785 _retained_gc_alloc_regions[ap] = NULL; | |
1786 // by default, we do not retain a GC alloc region for each ap; | |
1787 // we'll override this, when appropriate, below | |
1788 _retain_gc_alloc_region[ap] = false; | |
1789 } | |
1790 | |
1791 // We will try to remember the last half-full tenured region we | |
1792 // allocated to at the end of a collection so that we can re-use it | |
1793 // during the next collection. | |
1794 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1795 | |
342 | 1796 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1797 } | |
1798 | |
1799 jint G1CollectedHeap::initialize() { | |
1166 | 1800 CollectedHeap::pre_initialize(); |
342 | 1801 os::enable_vtime(); |
1802 | |
1803 // Necessary to satisfy locking discipline assertions. | |
1804 | |
1805 MutexLocker x(Heap_lock); | |
1806 | |
1807 // While there are no constraints in the GC code that HeapWordSize | |
1808 // be any particular value, there are multiple other areas in the | |
1809 // system which believe this to be true (e.g. oop->object_size in some | |
1810 // cases incorrectly returns the size in wordSize units rather than | |
1811 // HeapWordSize). | |
1812 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1813 | |
1814 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1815 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1816 | |
1817 // Ensure that the sizes are properly aligned. | |
1818 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1819 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1820 | |
1821 _cg1r = new ConcurrentG1Refine(); | |
1822 | |
1823 // Reserve the maximum. | |
1824 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1825 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1826 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1827 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1828 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1829 |
342 | 1830 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1831 HeapRegion::GrainBytes, | |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1832 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1833 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1834 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1835 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1836 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1837 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1838 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1839 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1840 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1841 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1842 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1843 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1844 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1845 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1846 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1847 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1848 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1849 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1850 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1851 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1852 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1853 } |
342 | 1854 |
1855 if (!heap_rs.is_reserved()) { | |
1856 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1857 return JNI_ENOMEM; | |
1858 } | |
1859 | |
1860 // It is important to do this in a way such that concurrent readers can't | |
1861 // temporarily think somethings in the heap. (I've actually seen this | |
1862 // happen in asserts: DLD.) | |
1863 _reserved.set_word_size(0); | |
1864 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1865 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1866 | |
1867 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1868 | |
1869 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1870 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1871 set_barrier_set(rem_set()->bs()); | |
1872 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1873 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1874 } else { | |
1875 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1876 return JNI_ENOMEM; | |
1877 } | |
1878 | |
1879 // Also create a G1 rem set. | |
1861 | 1880 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1881 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1882 } else { |
1861 | 1883 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1884 return JNI_ENOMEM; | |
342 | 1885 } |
1886 | |
1887 // Carve out the G1 part of the heap. | |
1888 | |
1889 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1890 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1891 g1_rs.size()/HeapWordSize); | |
1892 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1893 | |
1894 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1895 | |
1896 _g1_storage.initialize(g1_rs, 0); | |
1897 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
3766 | 1898 _hrs.initialize((HeapWord*) _g1_reserved.start(), |
1899 (HeapWord*) _g1_reserved.end(), | |
1900 _expansion_regions); | |
342 | 1901 |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1902 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1903 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1904 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1905 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1906 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1907 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1908 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1909 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1910 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1911 |
2152 | 1912 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); |
1913 | |
342 | 1914 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1915 heap_word_size(init_byte_size)); | |
1916 | |
1917 _g1h = this; | |
1918 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1919 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1920 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1921 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1922 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1923 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1924 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1925 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1926 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1927 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1928 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1929 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1930 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1931 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1932 |
342 | 1933 // Create the ConcurrentMark data structure and thread. |
1934 // (Must do this late, so that "max_regions" is defined.) | |
1935 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1936 _cmThread = _cm->cmThread(); | |
1937 | |
1938 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1939 HeapRegionRemSet::init_heap(max_regions()); | |
1940 | |
677 | 1941 // Now expand into the initial heap size. |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1942 if (!expand(init_byte_size)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1943 vm_exit_during_initialization("Failed to allocate initial heap."); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1944 return JNI_ENOMEM; |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
1945 } |
342 | 1946 |
1947 // Perform any initialization actions delegated to the policy. | |
1948 g1_policy()->init(); | |
1949 | |
1950 g1_policy()->note_start_of_mark_thread(); | |
1951 | |
1952 _refine_cte_cl = | |
1953 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1954 g1_rem_set(), | |
1955 concurrent_g1_refine()); | |
1956 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1957 | |
1958 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1959 SATB_Q_FL_lock, | |
1111 | 1960 G1SATBProcessCompletedThreshold, |
342 | 1961 Shared_SATB_Q_lock); |
794 | 1962 |
1963 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1964 DirtyCardQ_FL_lock, | |
1111 | 1965 concurrent_g1_refine()->yellow_zone(), |
1966 concurrent_g1_refine()->red_zone(), | |
794 | 1967 Shared_DirtyCardQ_lock); |
1968 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1969 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1970 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1971 DirtyCardQ_FL_lock, |
1111 | 1972 -1, // never trigger processing |
1973 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1974 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1975 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1976 } |
1705 | 1977 |
1978 // Initialize the card queue set used to hold cards containing | |
1979 // references into the collection set. | |
1980 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1981 DirtyCardQ_FL_lock, | |
1982 -1, // never trigger processing | |
1983 -1, // no limit on length | |
1984 Shared_DirtyCardQ_lock, | |
1985 &JavaThread::dirty_card_queue_set()); | |
1986 | |
342 | 1987 // In case we're keeping closure specialization stats, initialize those |
1988 // counts and that mechanism. | |
1989 SpecializationStats::clear(); | |
1990 | |
1991 _gc_alloc_region_list = NULL; | |
1992 | |
1993 // Do later initialization work for concurrent refinement. | |
1994 _cg1r->init(); | |
1995 | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1996 // Here we allocate the dummy full region that is required by the |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1997 // G1AllocRegion class. If we don't pass an address in the reserved |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
1998 // space here, lots of asserts fire. |
3766 | 1999 |
2000 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */, | |
2001 _g1_reserved.start()); | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2002 // We'll re-use the same region whether the alloc region will |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2003 // require BOT updates or not and, if it doesn't, then a non-young |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2004 // region will complain that it cannot support allocations without |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2005 // BOT updates. So we'll tag the dummy region as young to avoid that. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2006 dummy_region->set_young(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2007 // Make sure it's full. |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2008 dummy_region->set_top(dummy_region->end()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2009 G1AllocRegion::setup(this, dummy_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2010 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2011 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2012 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2013 // Do create of the monitoring and management support so that |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2014 // values in the heap have been properly initialized. |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2015 _g1mm = new G1MonitoringSupport(this, &_g1_storage); |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
2016 |
342 | 2017 return JNI_OK; |
2018 } | |
2019 | |
2020 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2021 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2022 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2023 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2024 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2025 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2026 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2027 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2028 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2029 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2030 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2031 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2032 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2033 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2034 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2035 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2036 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2037 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2038 |
342 | 2039 SharedHeap::ref_processing_init(); |
2040 MemRegion mr = reserved_region(); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2041 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2042 new ReferenceProcessor(mr, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2043 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2044 (int) ParallelGCThreads, // degree of mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2045 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2046 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2047 false, // Reference discovery is not atomic |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2048 &_is_alive_closure, // is alive closure for efficiency |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2049 true); // Setting next fields of discovered |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2361
diff
changeset
|
2050 // lists requires a barrier. |
342 | 2051 } |
2052 | |
2053 size_t G1CollectedHeap::capacity() const { | |
2054 return _g1_committed.byte_size(); | |
2055 } | |
2056 | |
1705 | 2057 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2058 DirtyCardQueue* into_cset_dcq, | |
2059 bool concurrent, | |
342 | 2060 int worker_i) { |
889 | 2061 // Clean cards in the hot card cache |
1705 | 2062 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2063 |
342 | 2064 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2065 int n_completed_buffers = 0; | |
1705 | 2066 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2067 n_completed_buffers++; |
2068 } | |
2069 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2070 (double) n_completed_buffers); | |
2071 dcqs.clear_n_completed_buffers(); | |
2072 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2073 } | |
2074 | |
2075 | |
2076 // Computes the sum of the storage used by the various regions. | |
2077 | |
2078 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2079 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2080 "Should be owned on this thread's behalf."); |
342 | 2081 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2082 // Read only once in case it is set to NULL concurrently |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2083 HeapRegion* hr = _mutator_alloc_region.get(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2084 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2085 result += hr->used(); |
342 | 2086 return result; |
2087 } | |
2088 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2089 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2090 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2091 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2092 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2093 |
342 | 2094 class SumUsedClosure: public HeapRegionClosure { |
2095 size_t _used; | |
2096 public: | |
2097 SumUsedClosure() : _used(0) {} | |
2098 bool doHeapRegion(HeapRegion* r) { | |
2099 if (!r->continuesHumongous()) { | |
2100 _used += r->used(); | |
2101 } | |
2102 return false; | |
2103 } | |
2104 size_t result() { return _used; } | |
2105 }; | |
2106 | |
2107 size_t G1CollectedHeap::recalculate_used() const { | |
2108 SumUsedClosure blk; | |
3766 | 2109 heap_region_iterate(&blk); |
342 | 2110 return blk.result(); |
2111 } | |
2112 | |
2113 #ifndef PRODUCT | |
2114 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2115 size_t _num; | |
2116 public: | |
677 | 2117 SumUsedRegionsClosure() : _num(0) {} |
342 | 2118 bool doHeapRegion(HeapRegion* r) { |
2119 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2120 _num += 1; | |
2121 } | |
2122 return false; | |
2123 } | |
2124 size_t result() { return _num; } | |
2125 }; | |
2126 | |
2127 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2128 SumUsedRegionsClosure blk; | |
3766 | 2129 heap_region_iterate(&blk); |
342 | 2130 return blk.result(); |
2131 } | |
2132 #endif // PRODUCT | |
2133 | |
2134 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2152 | 2135 if (free_regions() > 0) return HeapRegion::GrainBytes; |
342 | 2136 // otherwise, is there space in the current allocation region? |
2137 | |
2138 // We need to store the current allocation region in a local variable | |
2139 // here. The problem is that this method doesn't take any locks and | |
2140 // there may be other threads which overwrite the current allocation | |
2141 // region field. attempt_allocation(), for example, sets it to NULL | |
2142 // and this can happen *after* the NULL check here but before the call | |
2143 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2144 // to be a problem in the optimized build, since the two loads of the | |
2145 // current allocation region field are optimized away. | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2146 HeapRegion* hr = _mutator_alloc_region.get(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2147 if (hr == NULL) { |
342 | 2148 return 0; |
2149 } | |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2150 return hr->free(); |
342 | 2151 } |
2152 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2153 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2154 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2155 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2156 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2157 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2158 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2159 #ifndef PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2160 void G1CollectedHeap::allocate_dummy_regions() { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2161 // Let's fill up most of the region |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2162 size_t word_size = HeapRegion::GrainWords - 1024; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2163 // And as a result the region we'll allocate will be humongous. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2164 guarantee(isHumongous(word_size), "sanity"); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2165 |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2166 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2167 // Let's use the existing mechanism for the allocation |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2168 HeapWord* dummy_obj = humongous_obj_allocate(word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2169 if (dummy_obj != NULL) { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2170 MemRegion mr(dummy_obj, word_size); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2171 CollectedHeap::fill_with_object(mr); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2172 } else { |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2173 // If we can't allocate once, we probably cannot allocate |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2174 // again. Let's get out of the loop. |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2175 break; |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2176 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2177 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2178 } |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2179 #endif // !PRODUCT |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
2180 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2181 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2182 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2183 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2184 // We assume that if concurrent == true, then the caller is a |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2185 // concurrent thread that was joined the Suspendible Thread |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2186 // Set. If there's ever a cheap way to check this, we should add an |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2187 // assert here. |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2188 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2189 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2190 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2191 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2192 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2193 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2194 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2195 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2196 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2197 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2198 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2199 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2200 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2201 // This is the case for the inner caller, i.e. a Full GC. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2202 assert(concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2203 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2204 (full_collections_started == _full_collections_completed + 2), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2205 err_msg("for inner caller (Full GC): full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2206 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2207 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2208 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2209 // This is the case for the outer caller, i.e. the concurrent cycle. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2210 assert(!concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2211 (full_collections_started == _full_collections_completed + 1), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2212 err_msg("for outer caller (concurrent cycle): " |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2213 "full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2214 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2215 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2216 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2217 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2218 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2219 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2220 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2221 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2222 // incorrectly see that a marking cyle is still in progress. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2223 if (concurrent) { |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2224 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2225 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2226 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2227 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2228 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2229 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2230 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2231 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2232 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2233 |
342 | 2234 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2152 | 2235 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 2236 GCCauseSetter gcs(this, cause); |
2237 switch (cause) { | |
2238 case GCCause::_heap_inspection: | |
2239 case GCCause::_heap_dump: { | |
2240 HandleMark hm; | |
2241 do_full_collection(false); // don't clear all soft refs | |
2242 break; | |
2243 } | |
2244 default: // XXX FIX ME | |
2245 ShouldNotReachHere(); // Unexpected use of this function | |
2246 } | |
2247 } | |
2248 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2249 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2250 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2251 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2252 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2253 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2254 unsigned int full_gc_count_before; |
342 | 2255 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2256 MutexLocker ml(Heap_lock); |
1973 | 2257 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2258 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2259 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2260 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2261 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2262 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2263 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2264 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2265 // concurrent cycle. We're setting word_size to 0 which means that |
2266 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2267 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2268 0, /* word_size */ |
2269 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2270 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2271 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2272 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2273 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2274 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2275 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2276 |
1973 | 2277 // Schedule a standard evacuation pause. We're setting word_size |
2278 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2279 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2280 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2281 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2282 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2283 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2284 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2285 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2286 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2287 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2288 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2289 } |
342 | 2290 } |
2291 } | |
2292 | |
2293 bool G1CollectedHeap::is_in(const void* p) const { | |
3766 | 2294 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p); |
2295 if (hr != NULL) { | |
342 | 2296 return hr->is_in(p); |
2297 } else { | |
2298 return _perm_gen->as_gen()->is_in(p); | |
2299 } | |
2300 } | |
2301 | |
2302 // Iteration functions. | |
2303 | |
2304 // Iterates an OopClosure over all ref-containing fields of objects | |
2305 // within a HeapRegion. | |
2306 | |
2307 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2308 MemRegion _mr; | |
2309 OopClosure* _cl; | |
2310 public: | |
2311 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2312 : _mr(mr), _cl(cl) {} | |
2313 bool doHeapRegion(HeapRegion* r) { | |
2314 if (! r->continuesHumongous()) { | |
2315 r->oop_iterate(_cl); | |
2316 } | |
2317 return false; | |
2318 } | |
2319 }; | |
2320 | |
678 | 2321 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2322 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
3766 | 2323 heap_region_iterate(&blk); |
678 | 2324 if (do_perm) { |
2325 perm_gen()->oop_iterate(cl); | |
2326 } | |
342 | 2327 } |
2328 | |
678 | 2329 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2330 IterateOopClosureRegionClosure blk(mr, cl); |
3766 | 2331 heap_region_iterate(&blk); |
678 | 2332 if (do_perm) { |
2333 perm_gen()->oop_iterate(cl); | |
2334 } | |
342 | 2335 } |
2336 | |
2337 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2338 | |
2339 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2340 ObjectClosure* _cl; | |
2341 public: | |
2342 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2343 bool doHeapRegion(HeapRegion* r) { | |
2344 if (! r->continuesHumongous()) { | |
2345 r->object_iterate(_cl); | |
2346 } | |
2347 return false; | |
2348 } | |
2349 }; | |
2350 | |
678 | 2351 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2352 IterateObjectClosureRegionClosure blk(cl); |
3766 | 2353 heap_region_iterate(&blk); |
678 | 2354 if (do_perm) { |
2355 perm_gen()->object_iterate(cl); | |
2356 } | |
342 | 2357 } |
2358 | |
2359 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2360 // FIXME: is this right? | |
2361 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2362 } | |
2363 | |
2364 // Calls a SpaceClosure on a HeapRegion. | |
2365 | |
2366 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2367 SpaceClosure* _cl; | |
2368 public: | |
2369 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2370 bool doHeapRegion(HeapRegion* r) { | |
2371 _cl->do_space(r); | |
2372 return false; | |
2373 } | |
2374 }; | |
2375 | |
2376 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2377 SpaceClosureRegionClosure blk(cl); | |
3766 | 2378 heap_region_iterate(&blk); |
342 | 2379 } |
2380 | |
3766 | 2381 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { |
2382 _hrs.iterate(cl); | |
342 | 2383 } |
2384 | |
2385 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
3766 | 2386 HeapRegionClosure* cl) const { |
2387 _hrs.iterate_from(r, cl); | |
342 | 2388 } |
2389 | |
2390 void | |
2391 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2392 int worker, | |
2393 jint claim_value) { | |
355 | 2394 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2395 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2396 // try to spread out the starting points of the workers |
2397 const size_t start_index = regions / worker_num * (size_t) worker; | |
2398 | |
2399 // each worker will actually look at all regions | |
2400 for (size_t count = 0; count < regions; ++count) { | |
2401 const size_t index = (start_index + count) % regions; | |
2402 assert(0 <= index && index < regions, "sanity"); | |
2403 HeapRegion* r = region_at(index); | |
2404 // we'll ignore "continues humongous" regions (we'll process them | |
2405 // when we come across their corresponding "start humongous" | |
2406 // region) and regions already claimed | |
2407 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2408 continue; | |
2409 } | |
2410 // OK, try to claim it | |
342 | 2411 if (r->claimHeapRegion(claim_value)) { |
355 | 2412 // success! |
2413 assert(!r->continuesHumongous(), "sanity"); | |
2414 if (r->startsHumongous()) { | |
2415 // If the region is "starts humongous" we'll iterate over its | |
2416 // "continues humongous" first; in fact we'll do them | |
2417 // first. The order is important. In on case, calling the | |
2418 // closure on the "starts humongous" region might de-allocate | |
2419 // and clear all its "continues humongous" regions and, as a | |
2420 // result, we might end up processing them twice. So, we'll do | |
2421 // them first (notice: most closures will ignore them anyway) and | |
2422 // then we'll do the "starts humongous" region. | |
2423 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2424 HeapRegion* chr = region_at(ch_index); | |
2425 | |
2426 // if the region has already been claimed or it's not | |
2427 // "continues humongous" we're done | |
2428 if (chr->claim_value() == claim_value || | |
2429 !chr->continuesHumongous()) { | |
2430 break; | |
2431 } | |
2432 | |
2433 // Noone should have claimed it directly. We can given | |
2434 // that we claimed its "starts humongous" region. | |
2435 assert(chr->claim_value() != claim_value, "sanity"); | |
2436 assert(chr->humongous_start_region() == r, "sanity"); | |
2437 | |
2438 if (chr->claimHeapRegion(claim_value)) { | |
2439 // we should always be able to claim it; noone else should | |
2440 // be trying to claim this region | |
2441 | |
2442 bool res2 = cl->doHeapRegion(chr); | |
2443 assert(!res2, "Should not abort"); | |
2444 | |
2445 // Right now, this holds (i.e., no closure that actually | |
2446 // does something with "continues humongous" regions | |
2447 // clears them). We might have to weaken it in the future, | |
2448 // but let's leave these two asserts here for extra safety. | |
2449 assert(chr->continuesHumongous(), "should still be the case"); | |
2450 assert(chr->humongous_start_region() == r, "sanity"); | |
2451 } else { | |
2452 guarantee(false, "we should not reach here"); | |
2453 } | |
2454 } | |
2455 } | |
2456 | |
2457 assert(!r->continuesHumongous(), "sanity"); | |
2458 bool res = cl->doHeapRegion(r); | |
2459 assert(!res, "Should not abort"); | |
2460 } | |
2461 } | |
2462 } | |
2463 | |
390 | 2464 class ResetClaimValuesClosure: public HeapRegionClosure { |
2465 public: | |
2466 bool doHeapRegion(HeapRegion* r) { | |
2467 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2468 return false; | |
2469 } | |
2470 }; | |
2471 | |
2472 void | |
2473 G1CollectedHeap::reset_heap_region_claim_values() { | |
2474 ResetClaimValuesClosure blk; | |
2475 heap_region_iterate(&blk); | |
2476 } | |
2477 | |
355 | 2478 #ifdef ASSERT |
2479 // This checks whether all regions in the heap have the correct claim | |
2480 // value. I also piggy-backed on this a check to ensure that the | |
2481 // humongous_start_region() information on "continues humongous" | |
2482 // regions is correct. | |
2483 | |
2484 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2485 private: | |
2486 jint _claim_value; | |
2487 size_t _failures; | |
2488 HeapRegion* _sh_region; | |
2489 public: | |
2490 CheckClaimValuesClosure(jint claim_value) : | |
2491 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2492 bool doHeapRegion(HeapRegion* r) { | |
2493 if (r->claim_value() != _claim_value) { | |
2494 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2495 "claim value = %d, should be %d", | |
2496 r->bottom(), r->end(), r->claim_value(), | |
2497 _claim_value); | |
2498 ++_failures; | |
2499 } | |
2500 if (!r->isHumongous()) { | |
2501 _sh_region = NULL; | |
2502 } else if (r->startsHumongous()) { | |
2503 _sh_region = r; | |
2504 } else if (r->continuesHumongous()) { | |
2505 if (r->humongous_start_region() != _sh_region) { | |
2506 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2507 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2508 r->bottom(), r->end(), | |
2509 r->humongous_start_region(), | |
2510 _sh_region); | |
2511 ++_failures; | |
342 | 2512 } |
2513 } | |
355 | 2514 return false; |
2515 } | |
2516 size_t failures() { | |
2517 return _failures; | |
2518 } | |
2519 }; | |
2520 | |
2521 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2522 CheckClaimValuesClosure cl(claim_value); | |
2523 heap_region_iterate(&cl); | |
2524 return cl.failures() == 0; | |
2525 } | |
2526 #endif // ASSERT | |
342 | 2527 |
2528 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2529 HeapRegion* r = g1_policy()->collection_set(); | |
2530 while (r != NULL) { | |
2531 HeapRegion* next = r->next_in_collection_set(); | |
2532 if (cl->doHeapRegion(r)) { | |
2533 cl->incomplete(); | |
2534 return; | |
2535 } | |
2536 r = next; | |
2537 } | |
2538 } | |
2539 | |
2540 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2541 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2542 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2543 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2544 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2545 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2546 |
342 | 2547 assert(r->in_collection_set(), |
2548 "Start region must be a member of the collection set."); | |
2549 HeapRegion* cur = r; | |
2550 while (cur != NULL) { | |
2551 HeapRegion* next = cur->next_in_collection_set(); | |
2552 if (cl->doHeapRegion(cur) && false) { | |
2553 cl->incomplete(); | |
2554 return; | |
2555 } | |
2556 cur = next; | |
2557 } | |
2558 cur = g1_policy()->collection_set(); | |
2559 while (cur != r) { | |
2560 HeapRegion* next = cur->next_in_collection_set(); | |
2561 if (cl->doHeapRegion(cur) && false) { | |
2562 cl->incomplete(); | |
2563 return; | |
2564 } | |
2565 cur = next; | |
2566 } | |
2567 } | |
2568 | |
2569 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
3766 | 2570 return n_regions() > 0 ? region_at(0) : NULL; |
342 | 2571 } |
2572 | |
2573 | |
2574 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2575 Space* res = heap_region_containing(addr); | |
2576 if (res == NULL) | |
2577 res = perm_gen()->space_containing(addr); | |
2578 return res; | |
2579 } | |
2580 | |
2581 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2582 Space* sp = space_containing(addr); | |
2583 if (sp != NULL) { | |
2584 return sp->block_start(addr); | |
2585 } | |
2586 return NULL; | |
2587 } | |
2588 | |
2589 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2590 Space* sp = space_containing(addr); | |
2591 assert(sp != NULL, "block_size of address outside of heap"); | |
2592 return sp->block_size(addr); | |
2593 } | |
2594 | |
2595 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2596 Space* sp = space_containing(addr); | |
2597 return sp->block_is_obj(addr); | |
2598 } | |
2599 | |
2600 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2601 return true; | |
2602 } | |
2603 | |
2604 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2605 return HeapRegion::GrainBytes; | |
2606 } | |
2607 | |
2608 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2609 // Return the remaining space in the cur alloc region, but not less than | |
2610 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2611 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2612 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2613 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2614 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2615 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2616 HeapRegion* hr = _mutator_alloc_region.get(); |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2617 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2618 if (hr == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2619 return max_tlab_size; |
342 | 2620 } else { |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
2621 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); |
342 | 2622 } |
2623 } | |
2624 | |
2625 size_t G1CollectedHeap::large_typearray_limit() { | |
2626 // FIXME | |
2627 return HeapRegion::GrainBytes/HeapWordSize; | |
2628 } | |
2629 | |
2630 size_t G1CollectedHeap::max_capacity() const { | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
2631 return _g1_reserved.byte_size(); |
342 | 2632 } |
2633 | |
2634 jlong G1CollectedHeap::millis_since_last_gc() { | |
2635 // assert(false, "NYI"); | |
2636 return 0; | |
2637 } | |
2638 | |
2639 void G1CollectedHeap::prepare_for_verify() { | |
2640 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2641 ensure_parsability(false); | |
2642 } | |
2643 g1_rem_set()->prepare_for_verify(); | |
2644 } | |
2645 | |
2646 class VerifyLivenessOopClosure: public OopClosure { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2647 G1CollectedHeap* _g1h; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2648 VerifyOption _vo; |
342 | 2649 public: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2650 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo): |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2651 _g1h(g1h), _vo(vo) |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2652 { } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2653 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2654 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2655 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2656 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2657 oop obj = oopDesc::load_decode_heap_oop(p); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2658 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2659 "Dead object referenced by a not dead object"); |
342 | 2660 } |
2661 }; | |
2662 | |
2663 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2664 private: |
342 | 2665 G1CollectedHeap* _g1h; |
2666 size_t _live_bytes; | |
2667 HeapRegion *_hr; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2668 VerifyOption _vo; |
342 | 2669 public: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2670 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2671 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2672 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2673 VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo) |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2674 : _live_bytes(0), _hr(hr), _vo(vo) { |
342 | 2675 _g1h = G1CollectedHeap::heap(); |
2676 } | |
2677 void do_object(oop o) { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2678 VerifyLivenessOopClosure isLive(_g1h, _vo); |
342 | 2679 assert(o != NULL, "Huh?"); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2680 if (!_g1h->is_obj_dead_cond(o, _vo)) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2681 // If the object is alive according to the mark word, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2682 // then verify that the marking information agrees. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2683 // Note we can't verify the contra-positive of the |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2684 // above: if the object is dead (according to the mark |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2685 // word), it may not be marked, or may have been marked |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2686 // but has since became dead, or may have been allocated |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2687 // since the last marking. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2688 if (_vo == VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2689 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch"); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2690 } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2691 |
342 | 2692 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2693 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2694 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2695 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2696 } |
342 | 2697 } |
2698 } | |
2699 size_t live_bytes() { return _live_bytes; } | |
2700 }; | |
2701 | |
2702 class PrintObjsInRegionClosure : public ObjectClosure { | |
2703 HeapRegion *_hr; | |
2704 G1CollectedHeap *_g1; | |
2705 public: | |
2706 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2707 _g1 = G1CollectedHeap::heap(); | |
2708 }; | |
2709 | |
2710 void do_object(oop o) { | |
2711 if (o != NULL) { | |
2712 HeapWord *start = (HeapWord *) o; | |
2713 size_t word_sz = o->size(); | |
2714 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2715 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2716 (void*) o, word_sz, | |
2717 _g1->isMarkedPrev(o), | |
2718 _g1->isMarkedNext(o), | |
2719 _hr->obj_allocated_since_prev_marking(o)); | |
2720 HeapWord *end = start + word_sz; | |
2721 HeapWord *cur; | |
2722 int *val; | |
2723 for (cur = start; cur < end; cur++) { | |
2724 val = (int *) cur; | |
2725 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2726 } | |
2727 } | |
2728 } | |
2729 }; | |
2730 | |
2731 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2732 private: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2733 bool _allow_dirty; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2734 bool _par; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2735 VerifyOption _vo; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2736 bool _failures; |
811 | 2737 public: |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2738 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2739 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2740 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2741 VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2742 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2743 _par(par), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2744 _vo(vo), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2745 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2746 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2747 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2748 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2749 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2750 |
342 | 2751 bool doHeapRegion(HeapRegion* r) { |
390 | 2752 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2753 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2754 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2755 bool failures = false; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2756 r->verify(_allow_dirty, _vo, &failures); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2757 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2758 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2759 } else { |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2760 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2761 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2762 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2763 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2764 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2765 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2766 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2767 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2768 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2769 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2770 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2771 } |
342 | 2772 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2773 return false; // stop the region iteration if we hit a failure |
342 | 2774 } |
2775 }; | |
2776 | |
2777 class VerifyRootsClosure: public OopsInGenClosure { | |
2778 private: | |
2779 G1CollectedHeap* _g1h; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2780 VerifyOption _vo; |
342 | 2781 bool _failures; |
2782 public: | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2783 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2784 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2785 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2786 VerifyRootsClosure(VerifyOption vo) : |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2787 _g1h(G1CollectedHeap::heap()), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2788 _vo(vo), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2789 _failures(false) { } |
342 | 2790 |
2791 bool failures() { return _failures; } | |
2792 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2793 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2794 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2795 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2796 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2797 if (_g1h->is_obj_dead_cond(obj, _vo)) { |
342 | 2798 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2799 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2800 if (_vo == VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2801 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark())); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2802 } |
342 | 2803 obj->print_on(gclog_or_tty); |
2804 _failures = true; | |
2805 } | |
2806 } | |
2807 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2808 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2809 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2810 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2811 }; |
2812 | |
390 | 2813 // This is the task used for parallel heap verification. |
2814 | |
2815 class G1ParVerifyTask: public AbstractGangTask { | |
2816 private: | |
2817 G1CollectedHeap* _g1h; | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2818 bool _allow_dirty; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2819 VerifyOption _vo; |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2820 bool _failures; |
390 | 2821 |
2822 public: | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2823 // _vo == UsePrevMarking -> use "prev" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2824 // _vo == UseNextMarking -> use "next" marking information, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2825 // _vo == UseMarkWord -> use mark word from object header. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2826 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) : |
390 | 2827 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2828 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2829 _allow_dirty(allow_dirty), |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2830 _vo(vo), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2831 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2832 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2833 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2834 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2835 } |
390 | 2836 |
2837 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2838 HandleMark hm; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2839 VerifyRegionClosure blk(_allow_dirty, true, _vo); |
390 | 2840 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2841 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2842 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2843 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2844 } |
390 | 2845 } |
2846 }; | |
2847 | |
342 | 2848 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2849 verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking); |
811 | 2850 } |
2851 | |
2852 void G1CollectedHeap::verify(bool allow_dirty, | |
2853 bool silent, | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2854 VerifyOption vo) { |
342 | 2855 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2856 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2857 VerifyRootsClosure rootsCl(vo); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2858 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2859 |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2860 // We apply the relevant closures to all the oops in the |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2861 // system dictionary, the string table and the code cache. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2862 const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2863 |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2864 process_strong_roots(true, // activate StrongRootsScope |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2865 true, // we set "collecting perm gen" to true, |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2866 // so we don't reset the dirty cards in the perm gen. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
3289
diff
changeset
|
2867 SharedHeap::ScanningOption(so), // roots scanning options |
342 | 2868 &rootsCl, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2869 &blobsCl, |
342 | 2870 &rootsCl); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2871 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2872 // If we're verifying after the marking phase of a Full GC then we can't |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2873 // treat the perm gen as roots into the G1 heap. Some of the objects in |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2874 // the perm gen may be dead and hence not marked. If one of these dead |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2875 // objects is considered to be a root then we may end up with a false |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2876 // "Root location <x> points to dead ob <y>" failure. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2877 if (vo != VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2878 // Since we used "collecting_perm_gen" == true above, we will not have |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2879 // checked the refs from perm into the G1-collected heap. We check those |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2880 // references explicitly below. Whether the relevant cards are dirty |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2881 // is checked further below in the rem set verification. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2882 if (!silent) { gclog_or_tty->print("Permgen roots "); } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2883 perm_gen()->oop_iterate(&rootsCl); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2884 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2885 bool failures = rootsCl.failures(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2886 |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2887 if (vo != VerifyOption_G1UseMarkWord) { |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2888 // If we're verifying during a full GC then the region sets |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2889 // will have been torn down at the start of the GC. Therefore |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2890 // verifying the region sets will fail. So we only verify |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2891 // the region sets when not in a full GC. |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2892 if (!silent) { gclog_or_tty->print("HeapRegionSets "); } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2893 verify_region_sets(); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2894 } |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2895 |
2152 | 2896 if (!silent) { gclog_or_tty->print("HeapRegions "); } |
390 | 2897 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2898 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2899 "sanity check"); | |
2900 | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2901 G1ParVerifyTask task(this, allow_dirty, vo); |
390 | 2902 int n_workers = workers()->total_workers(); |
2903 set_par_threads(n_workers); | |
2904 workers()->run_task(&task); | |
2905 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2906 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2907 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2908 } |
390 | 2909 |
2910 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2911 "sanity check"); | |
2912 | |
2913 reset_heap_region_claim_values(); | |
2914 | |
2915 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2916 "sanity check"); | |
2917 } else { | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2918 VerifyRegionClosure blk(allow_dirty, false, vo); |
3766 | 2919 heap_region_iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2920 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2921 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2922 } |
390 | 2923 } |
2152 | 2924 if (!silent) gclog_or_tty->print("RemSet "); |
342 | 2925 rem_set()->verify(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2926 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2927 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2928 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2929 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2930 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2931 #ifndef PRODUCT |
1044 | 2932 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2933 concurrent_mark()->print_reachable("at-verification-failure", |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
2934 vo, false /* all */); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2935 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2936 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2937 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2938 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2939 guarantee(!failures, "there should not have been any failures"); |
342 | 2940 } else { |
2941 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2942 } | |
2943 } | |
2944 | |
2945 class PrintRegionClosure: public HeapRegionClosure { | |
2946 outputStream* _st; | |
2947 public: | |
2948 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2949 bool doHeapRegion(HeapRegion* r) { | |
2950 r->print_on(_st); | |
2951 return false; | |
2952 } | |
2953 }; | |
2954 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2955 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2956 |
2957 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2958 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2959 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2960 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2961 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2962 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2963 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2964 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2965 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2966 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2967 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2968 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2969 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2970 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2971 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2972 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2973 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2974 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2975 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2976 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2977 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2978 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2979 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2980 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2981 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2982 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2983 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2984 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2985 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2986 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2987 PrintRegionClosure blk(st); |
3766 | 2988 heap_region_iterate(&blk); |
342 | 2989 } |
2990 | |
2991 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2992 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2993 workers()->print_worker_threads_on(st); |
2994 } | |
2995 _cmThread->print_on(st); | |
342 | 2996 st->cr(); |
1019 | 2997 _cm->print_worker_threads_on(st); |
2998 _cg1r->print_worker_threads_on(st); | |
342 | 2999 st->cr(); |
3000 } | |
3001 | |
3002 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3003 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3004 workers()->threads_do(tc); |
3005 } | |
3006 tc->do_thread(_cmThread); | |
794 | 3007 _cg1r->threads_do(tc); |
342 | 3008 } |
3009 | |
3010 void G1CollectedHeap::print_tracing_info() const { | |
3011 // We'll overload this to mean "trace GC pause statistics." | |
3012 if (TraceGen0Time || TraceGen1Time) { | |
3013 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
3014 // to that. | |
3015 g1_policy()->print_tracing_info(); | |
3016 } | |
751 | 3017 if (G1SummarizeRSetStats) { |
342 | 3018 g1_rem_set()->print_summary_info(); |
3019 } | |
1282 | 3020 if (G1SummarizeConcMark) { |
342 | 3021 concurrent_mark()->print_summary_info(); |
3022 } | |
3023 g1_policy()->print_yg_surv_rate_info(); | |
3024 SpecializationStats::print(); | |
3025 } | |
3026 | |
3027 G1CollectedHeap* G1CollectedHeap::heap() { | |
3028 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
3029 "not a garbage-first heap"); | |
3030 return _g1h; | |
3031 } | |
3032 | |
3033 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3034 // always_do_update_barrier = false; |
342 | 3035 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3036 // Call allocation profiler | |
3037 AllocationProfiler::iterate_since_last_gc(); | |
3038 // Fill TLAB's and such | |
3039 ensure_parsability(true); | |
3040 } | |
3041 | |
3042 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
3043 // FIXME: what is this about? | |
3044 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
3045 // is set. | |
3046 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
3047 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3048 // always_do_update_barrier = true; |
342 | 3049 } |
3050 | |
1973 | 3051 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3052 unsigned int gc_count_before, | |
3053 bool* succeeded) { | |
3054 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3055 g1_policy()->record_stop_world_start(); |
1973 | 3056 VM_G1IncCollectionPause op(gc_count_before, |
3057 word_size, | |
3058 false, /* should_initiate_conc_mark */ | |
3059 g1_policy()->max_pause_time_ms(), | |
3060 GCCause::_g1_inc_collection_pause); | |
3061 VMThread::execute(&op); | |
3062 | |
3063 HeapWord* result = op.result(); | |
3064 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3065 assert(result == NULL || ret_succeeded, | |
3066 "the result should be NULL if the VM did not succeed"); | |
3067 *succeeded = ret_succeeded; | |
3068 | |
3069 assert_heap_not_locked(); | |
3070 return result; | |
342 | 3071 } |
3072 | |
3073 void | |
3074 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3075 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3076 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3077 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3078 CGC_lock->notify(); |
342 | 3079 } |
3080 } | |
3081 | |
3082 void G1CollectedHeap::do_sync_mark() { | |
3083 _cm->checkpointRootsInitial(); | |
3084 _cm->markFromRoots(); | |
3085 _cm->checkpointRootsFinal(false); | |
3086 } | |
3087 | |
3088 // <NEW PREDICTION> | |
3089 | |
3090 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3091 bool young) { | |
3092 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3093 } | |
3094 | |
3095 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3096 predicted_time_ms) { | |
3097 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3098 } | |
3099 | |
3100 size_t G1CollectedHeap::pending_card_num() { | |
3101 size_t extra_cards = 0; | |
3102 JavaThread *curr = Threads::first(); | |
3103 while (curr != NULL) { | |
3104 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3105 extra_cards += dcq.size(); | |
3106 curr = curr->next(); | |
3107 } | |
3108 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3109 size_t buffer_size = dcqs.buffer_size(); | |
3110 size_t buffer_num = dcqs.completed_buffers_num(); | |
3111 return buffer_size * buffer_num + extra_cards; | |
3112 } | |
3113 | |
3114 size_t G1CollectedHeap::max_pending_card_num() { | |
3115 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3116 size_t buffer_size = dcqs.buffer_size(); | |
3117 size_t buffer_num = dcqs.completed_buffers_num(); | |
3118 int thread_num = Threads::number_of_threads(); | |
3119 return (buffer_num + thread_num) * buffer_size; | |
3120 } | |
3121 | |
3122 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3123 return g1_rem_set()->cardsScanned(); |
342 | 3124 } |
3125 | |
3126 void | |
3127 G1CollectedHeap::setup_surviving_young_words() { | |
3128 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3129 size_t array_length = g1_policy()->young_cset_length(); | |
3130 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3131 if (_surviving_young_words == NULL) { | |
3132 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3133 "Not enough space for young surv words summary."); | |
3134 } | |
3135 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3136 #ifdef ASSERT |
342 | 3137 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3138 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3139 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3140 #endif // !ASSERT |
342 | 3141 } |
3142 | |
3143 void | |
3144 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3145 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3146 size_t array_length = g1_policy()->young_cset_length(); | |
3147 for (size_t i = 0; i < array_length; ++i) | |
3148 _surviving_young_words[i] += surv_young_words[i]; | |
3149 } | |
3150 | |
3151 void | |
3152 G1CollectedHeap::cleanup_surviving_young_words() { | |
3153 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3154 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3155 _surviving_young_words = NULL; | |
3156 } | |
3157 | |
3158 // </NEW PREDICTION> | |
3159 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3160 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3161 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3162 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3163 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3164 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3165 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3166 |
1709 | 3167 #if TASKQUEUE_STATS |
3168 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3169 st->print_raw_cr("GC Task Stats"); | |
3170 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3171 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3172 } | |
3173 | |
3174 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3175 print_taskqueue_stats_hdr(st); | |
3176 | |
3177 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3178 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3179 for (int i = 0; i < n; ++i) { |
3180 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3181 totals += task_queue(i)->stats; | |
3182 } | |
3183 st->print_raw("tot "); totals.print(st); st->cr(); | |
3184 | |
3185 DEBUG_ONLY(totals.verify()); | |
3186 } | |
3187 | |
3188 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3189 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3190 for (int i = 0; i < n; ++i) { |
3191 task_queue(i)->stats.reset(); | |
3192 } | |
3193 } | |
3194 #endif // TASKQUEUE_STATS | |
3195 | |
1973 | 3196 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3197 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
2152 | 3198 assert_at_safepoint(true /* should_be_vm_thread */); |
3199 guarantee(!is_gc_active(), "collection is not reentrant"); | |
3200 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3201 if (GC_locker::check_active_before_gc()) { |
1973 | 3202 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3203 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3204 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
3205 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
2039
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3206 ResourceMark rm; |
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3207 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3208 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3209 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3210 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3211 |
2152 | 3212 verify_region_sets_optional(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3213 verify_dirty_young_regions(); |
2152 | 3214 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3215 { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3216 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3217 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3218 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3219 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3220 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3221 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3222 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3223 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3224 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3225 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3226 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3227 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3228 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3229 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3230 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3231 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3232 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3233 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3234 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3235 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3236 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3237 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3238 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3239 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3240 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3241 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3242 |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3243 TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3323
diff
changeset
|
3244 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3245 |
2361 | 3246 // If the secondary_free_list is not empty, append it to the |
3247 // free_list. No need to wait for the cleanup operation to finish; | |
3248 // the region allocation code will check the secondary_free_list | |
3249 // and wait if necessary. If the G1StressConcRegionFreeing flag is | |
3250 // set, skip this step so that the region allocation code has to | |
3251 // get entries from the secondary_free_list. | |
2152 | 3252 if (!G1StressConcRegionFreeing) { |
2361 | 3253 append_secondary_free_list_if_not_empty_with_lock(); |
2152 | 3254 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3255 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3256 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3257 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3258 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3259 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3260 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3261 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3262 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3263 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3264 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3265 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3267 increment_total_collections(false /* full gc */); |
342 | 3268 |
3269 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3270 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3271 print(); |
342 | 3272 #endif |
3273 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3274 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3275 HandleMark hm; // Discard invalid handles created during verification |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3276 gclog_or_tty->print(" VerifyBeforeGC:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3277 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3278 Universe::verify(/* allow dirty */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3279 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3280 /* option */ VerifyOption_G1UsePrevMarking); |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3281 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3282 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3283 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3284 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3285 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3286 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3287 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3288 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3289 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3290 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3291 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3292 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3293 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3294 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3295 // of the collection set!). |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3296 release_mutator_alloc_region(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3297 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3298 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3299 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3300 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3301 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3302 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3303 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3304 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3305 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3306 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3307 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3308 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3309 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3310 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3311 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3312 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3313 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3314 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3315 #endif // YOUNG_LIST_VERBOSE |
342 | 3316 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3317 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3318 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3319 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3320 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3321 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3322 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3323 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3324 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3325 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3326 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3327 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3328 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3329 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3330 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3331 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3332 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3333 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3334 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3335 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3336 |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3337 if (mark_in_progress()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3338 concurrent_mark()->newCSet(); |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3339 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3340 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3341 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3342 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3343 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3344 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3345 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3346 |
1707 | 3347 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3348 |
3378
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3349 // We have chosen the complete collection set. If marking is |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3350 // active then, we clear the region fields of any of the |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3351 // concurrent marking tasks whose region fields point into |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3352 // the collection set as these values will become stale. This |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3353 // will cause the owning marking threads to claim a new region |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3354 // when marking restarts. |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3355 if (mark_in_progress()) { |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3356 concurrent_mark()->reset_active_task_region_fields_in_cset(); |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3357 } |
69293e516993
7041440: G1: assert(obj->is_oop_or_null(true )) failed: Error #
johnc
parents:
3377
diff
changeset
|
3358 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3359 // Nothing to do if we were unable to choose a collection set. |
342 | 3360 #if G1_REM_SET_LOGGING |
1707 | 3361 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3362 print(); | |
342 | 3363 #endif |
1707 | 3364 PrepareForRSScanningClosure prepare_for_rs_scan; |
3365 collection_set_iterate(&prepare_for_rs_scan); | |
3366 | |
3367 setup_surviving_young_words(); | |
3368 | |
3369 // Set up the gc allocation regions. | |
3370 get_gc_alloc_regions(); | |
3371 | |
3372 // Actually do the work... | |
3373 evacuate_collection_set(); | |
3374 | |
3375 free_collection_set(g1_policy()->collection_set()); | |
3376 g1_policy()->clear_collection_set(); | |
3377 | |
3378 cleanup_surviving_young_words(); | |
3379 | |
3380 // Start a new incremental collection set for the next pause. | |
3381 g1_policy()->start_incremental_cset_building(); | |
3382 | |
3383 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3384 // regions to the incremental collection set for the next | |
3385 // evacuation pause. | |
3386 clear_cset_fast_test(); | |
3387 | |
3388 if (g1_policy()->in_young_gc_mode()) { | |
3389 _young_list->reset_sampled_info(); | |
3390 | |
3391 // Don't check the whole heap at this point as the | |
3392 // GC alloc regions from this pause have been tagged | |
3393 // as survivors and moved on to the survivor list. | |
3394 // Survivor regions will fail the !is_young() check. | |
3395 assert(check_young_list_empty(false /* check_heap */), | |
3396 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3397 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3398 #if YOUNG_LIST_VERBOSE |
1707 | 3399 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3400 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3401 #endif // YOUNG_LIST_VERBOSE |
342 | 3402 |
1707 | 3403 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3404 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3405 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3406 |
1707 | 3407 _young_list->reset_auxilary_lists(); |
342 | 3408 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3409 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3410 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3411 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3412 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3413 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3414 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3415 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3416 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3417 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3418 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3419 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3420 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3421 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3422 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3423 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3424 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3425 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3426 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3427 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3428 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3429 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3430 } |
342 | 3431 |
3285
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3432 allocate_dummy_regions(); |
49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
tonyp
parents:
2433
diff
changeset
|
3433 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3434 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3435 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3436 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3437 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3438 #endif // YOUNG_LIST_VERBOSE |
342 | 3439 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3440 init_mutator_alloc_region(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3441 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3442 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3443 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3444 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3445 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3446 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3447 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3448 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3449 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3450 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3451 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3452 prepare_for_verify(); |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3453 Universe::verify(/* allow dirty */ true, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3454 /* silent */ false, |
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3766
diff
changeset
|
3455 /* option */ VerifyOption_G1UsePrevMarking); |
342 | 3456 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3457 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3458 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3459 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3460 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3461 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3462 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3463 size_t bytes_before = capacity(); |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3464 if (!expand(expand_bytes)) { |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3465 // We failed to expand the heap so let's verify that |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3466 // committed/uncommitted amount match the backing store |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3467 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3468 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3469 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3470 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3471 } |
3764
053d84a76d3d
7032531: G1: enhance GC logging to include more accurate eden / survivor size transitions
tonyp
parents:
3378
diff
changeset
|
3472 // We have to do this after we decide whether to expand the heap or not. |
053d84a76d3d
7032531: G1: enhance GC logging to include more accurate eden / survivor size transitions
tonyp
parents:
3378
diff
changeset
|
3473 g1_policy()->print_heap_transition(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3474 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3475 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3476 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3477 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3478 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3479 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3480 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3481 #endif |
342 | 3482 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3483 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3484 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3485 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3486 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3487 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3488 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3489 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3490 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3491 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3492 |
3766 | 3493 _hrs.verify_optional(); |
2152 | 3494 verify_region_sets_optional(); |
3495 | |
1709 | 3496 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3497 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3498 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3499 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3500 Universe::print_heap_after_gc(); |
342 | 3501 } |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3502 g1mm()->update_counters(); |
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
3503 |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3504 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3505 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3506 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3507 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3508 } |
1973 | 3509 |
3510 return true; | |
342 | 3511 } |
3512 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3513 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3514 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3515 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3516 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3517 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3518 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3519 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3520 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3521 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3522 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3523 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3524 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3525 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3526 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3527 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3528 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3529 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3530 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3531 void G1CollectedHeap::init_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3532 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3533 _mutator_alloc_region.init(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3534 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3535 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3536 void G1CollectedHeap::release_mutator_alloc_region() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3537 _mutator_alloc_region.release(); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3538 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
3539 } |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3540 |
342 | 3541 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3542 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3543 // make sure we don't call set_gc_alloc_region() multiple times on |
3544 // the same region | |
3545 assert(r == NULL || !r->is_gc_alloc_region(), | |
3546 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3547 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3548 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3549 |
342 | 3550 HeapWord* original_top = NULL; |
3551 if (r != NULL) | |
3552 original_top = r->top(); | |
3553 | |
3554 // We will want to record the used space in r as being there before gc. | |
3555 // One we install it as a GC alloc region it's eligible for allocation. | |
3556 // So record it now and use it later. | |
3557 size_t r_used = 0; | |
3558 if (r != NULL) { | |
3559 r_used = r->used(); | |
3560 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3561 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3562 // need to take the lock to guard against two threads calling |
3563 // get_gc_alloc_region concurrently (very unlikely but...) | |
3564 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3565 r->save_marks(); | |
3566 } | |
3567 } | |
3568 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3569 _gc_alloc_regions[purpose] = r; | |
3570 if (old_alloc_region != NULL) { | |
3571 // Replace aliases too. | |
3572 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3573 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3574 _gc_alloc_regions[ap] = r; | |
3575 } | |
3576 } | |
3577 } | |
3578 if (r != NULL) { | |
3579 push_gc_alloc_region(r); | |
3580 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3581 // We are using a region as a GC alloc region after it has been used | |
3582 // as a mutator allocation region during the current marking cycle. | |
3583 // The mutator-allocated objects are currently implicitly marked, but | |
3584 // when we move hr->next_top_at_mark_start() forward at the the end | |
3585 // of the GC pause, they won't be. We therefore mark all objects in | |
3586 // the "gap". We do this object-by-object, since marking densely | |
3587 // does not currently work right with marking bitmap iteration. This | |
3588 // means we rely on TLAB filling at the start of pauses, and no | |
3589 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3590 // to fix the marking bitmap iteration. | |
3591 HeapWord* curhw = r->next_top_at_mark_start(); | |
3592 HeapWord* t = original_top; | |
3593 | |
3594 while (curhw < t) { | |
3595 oop cur = (oop)curhw; | |
3596 // We'll assume parallel for generality. This is rare code. | |
3597 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3598 curhw = curhw + cur->size(); | |
3599 } | |
3600 assert(curhw == t, "Should have parsed correctly."); | |
3601 } | |
3602 if (G1PolicyVerbose > 1) { | |
3603 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3604 "for survivors:", r->bottom(), original_top, r->end()); | |
3605 r->print(); | |
3606 } | |
3607 g1_policy()->record_before_bytes(r_used); | |
3608 } | |
3609 } | |
3610 | |
3611 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3612 assert(Thread::current()->is_VM_thread() || | |
2152 | 3613 FreeList_lock->owned_by_self(), "Precondition"); |
342 | 3614 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
3615 "Precondition."); | |
3616 hr->set_is_gc_alloc_region(true); | |
3617 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3618 _gc_alloc_region_list = hr; | |
3619 } | |
3620 | |
3621 #ifdef G1_DEBUG | |
3622 class FindGCAllocRegion: public HeapRegionClosure { | |
3623 public: | |
3624 bool doHeapRegion(HeapRegion* r) { | |
3625 if (r->is_gc_alloc_region()) { | |
3766 | 3626 gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region", |
3627 HR_FORMAT_PARAMS(r)); | |
342 | 3628 } |
3629 return false; | |
3630 } | |
3631 }; | |
3632 #endif // G1_DEBUG | |
3633 | |
3634 void G1CollectedHeap::forget_alloc_region_list() { | |
2152 | 3635 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 3636 while (_gc_alloc_region_list != NULL) { |
3637 HeapRegion* r = _gc_alloc_region_list; | |
3638 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3639 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3640 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3641 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3642 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3643 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3644 r->ContiguousSpace::set_saved_mark(); |
342 | 3645 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3646 r->set_next_gc_alloc_region(NULL); | |
3647 r->set_is_gc_alloc_region(false); | |
545 | 3648 if (r->is_survivor()) { |
3649 if (r->is_empty()) { | |
3650 r->set_not_young(); | |
3651 } else { | |
3652 _young_list->add_survivor_region(r); | |
3653 } | |
3654 } | |
342 | 3655 } |
3656 #ifdef G1_DEBUG | |
3657 FindGCAllocRegion fa; | |
3658 heap_region_iterate(&fa); | |
3659 #endif // G1_DEBUG | |
3660 } | |
3661 | |
3662 | |
3663 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3664 // TODO: allocation regions check | |
3665 return true; | |
3666 } | |
3667 | |
3668 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3669 // First, let's check that the GC alloc region list is empty (it should) |
3670 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3671 | |
342 | 3672 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3673 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3674 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3675 |
342 | 3676 // Create new GC alloc regions. |
636 | 3677 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3678 _retained_gc_alloc_regions[ap] = NULL; | |
3679 | |
3680 if (alloc_region != NULL) { | |
3681 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3682 | |
3683 // let's make sure that the GC alloc region is not tagged as such | |
3684 // outside a GC operation | |
3685 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3686 | |
3687 if (alloc_region->in_collection_set() || | |
3688 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3689 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3690 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3691 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3692 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3693 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3694 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3695 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3696 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3697 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3698 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3699 // object that may be less than the region size). |
636 | 3700 |
3701 alloc_region = NULL; | |
3702 } | |
3703 } | |
3704 | |
3705 if (alloc_region == NULL) { | |
3706 // we will get a new GC alloc region | |
2188
c33825b68624
6923430: G1: assert(res != 0,"This should have worked.")
johnc
parents:
2173
diff
changeset
|
3707 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3708 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3709 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3710 ++_gc_alloc_region_counts[ap]; |
1388 | 3711 if (G1PrintHeapRegions) { |
3766 | 3712 gclog_or_tty->print_cr("new alloc region "HR_FORMAT, |
3713 HR_FORMAT_PARAMS(alloc_region)); | |
1388 | 3714 } |
342 | 3715 } |
636 | 3716 |
342 | 3717 if (alloc_region != NULL) { |
636 | 3718 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3719 set_gc_alloc_region(ap, alloc_region); |
3720 } | |
636 | 3721 |
3722 assert(_gc_alloc_regions[ap] == NULL || | |
3723 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3724 "the GC alloc region should be tagged as such"); | |
3725 assert(_gc_alloc_regions[ap] == NULL || | |
3726 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3727 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3728 } |
3729 // Set alternative regions for allocation purposes that have reached | |
636 | 3730 // their limit. |
342 | 3731 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3732 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3733 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3734 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3735 } | |
3736 } | |
3737 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3738 } | |
3739 | |
636 | 3740 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3741 // We keep a separate list of all regions that have been alloc regions in |
636 | 3742 // the current collection pause. Forget that now. This method will |
3743 // untag the GC alloc regions and tear down the GC alloc region | |
3744 // list. It's desirable that no regions are tagged as GC alloc | |
3745 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3746 |
342 | 3747 forget_alloc_region_list(); |
3748 | |
3749 // The current alloc regions contain objs that have survived | |
3750 // collection. Make them no longer GC alloc regions. | |
3751 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3752 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3753 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3754 _gc_alloc_region_counts[ap] = 0; |
636 | 3755 |
3756 if (r != NULL) { | |
3757 // we retain nothing on _gc_alloc_regions between GCs | |
3758 set_gc_alloc_region(ap, NULL); | |
3759 | |
3760 if (r->is_empty()) { | |
2152 | 3761 // We didn't actually allocate anything in it; let's just put |
3762 // it back on the free list. | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
3763 _free_list.add_as_head(r); |
636 | 3764 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3765 // retain it so that we can use it at the beginning of the next GC | |
3766 _retained_gc_alloc_regions[ap] = r; | |
342 | 3767 } |
3768 } | |
636 | 3769 } |
3770 } | |
3771 | |
3772 #ifndef PRODUCT | |
3773 // Useful for debugging | |
3774 | |
3775 void G1CollectedHeap::print_gc_alloc_regions() { | |
3776 gclog_or_tty->print_cr("GC alloc regions"); | |
3777 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3778 HeapRegion* r = _gc_alloc_regions[ap]; | |
3779 if (r == NULL) { | |
3780 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3781 } else { | |
3782 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3783 ap, r->bottom(), r->used()); | |
3784 } | |
3785 } | |
3786 } | |
3787 #endif // PRODUCT | |
342 | 3788 |
3789 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3790 _drain_in_progress = false; | |
3791 set_evac_failure_closure(cl); | |
3792 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3793 } | |
3794 | |
3795 void G1CollectedHeap::finalize_for_evac_failure() { | |
3796 assert(_evac_failure_scan_stack != NULL && | |
3797 _evac_failure_scan_stack->length() == 0, | |
3798 "Postcondition"); | |
3799 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3800 delete _evac_failure_scan_stack; |
342 | 3801 _evac_failure_scan_stack = NULL; |
3802 } | |
3803 | |
3804 | |
3805 | |
3806 // *** Sequential G1 Evacuation | |
3807 | |
3808 class G1IsAliveClosure: public BoolObjectClosure { | |
3809 G1CollectedHeap* _g1; | |
3810 public: | |
3811 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3812 void do_object(oop p) { assert(false, "Do not call."); } | |
3813 bool do_object_b(oop p) { | |
3814 // It is reachable if it is outside the collection set, or is inside | |
3815 // and forwarded. | |
3816 | |
3817 #ifdef G1_DEBUG | |
3818 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3819 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3820 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3821 #endif // G1_DEBUG | |
3822 | |
3823 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3824 } | |
3825 }; | |
3826 | |
3827 class G1KeepAliveClosure: public OopClosure { | |
3828 G1CollectedHeap* _g1; | |
3829 public: | |
3830 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3831 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3832 void do_oop( oop* p) { |
342 | 3833 oop obj = *p; |
3834 #ifdef G1_DEBUG | |
3835 if (PrintGC && Verbose) { | |
3836 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3837 p, (void*) obj, (void*) *p); | |
3838 } | |
3839 #endif // G1_DEBUG | |
3840 | |
3841 if (_g1->obj_in_cs(obj)) { | |
3842 assert( obj->is_forwarded(), "invariant" ); | |
3843 *p = obj->forwardee(); | |
3844 #ifdef G1_DEBUG | |
3845 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3846 (void*) obj, (void*) *p); | |
3847 #endif // G1_DEBUG | |
3848 } | |
3849 } | |
3850 }; | |
3851 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3852 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3853 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3854 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3855 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3856 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3857 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3858 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3859 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3860 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3861 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3862 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3863 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3864 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3865 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3866 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3867 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3868 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3869 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3870 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3871 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3872 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3873 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3874 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3875 |
342 | 3876 class RemoveSelfPointerClosure: public ObjectClosure { |
3877 private: | |
3878 G1CollectedHeap* _g1; | |
3879 ConcurrentMark* _cm; | |
3880 HeapRegion* _hr; | |
3881 size_t _prev_marked_bytes; | |
3882 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3883 OopsInHeapRegionClosure *_cl; |
342 | 3884 public: |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3885 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3886 OopsInHeapRegionClosure* cl) : |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3887 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3888 _next_marked_bytes(0), _cl(cl) {} |
342 | 3889 |
3890 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3891 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3892 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3893 // <original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3894 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3895 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3896 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3897 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3898 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3899 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3900 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3901 // would point into middle of the filler object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3902 // The current approach is to not coalesce and leave the BOT contents intact. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3903 // </original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3904 // |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3905 // We now reset the BOT when we start the object iteration over the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3906 // region and refine its entries for every object we come across. So |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3907 // the above comment is not really relevant and we should be able |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3908 // to coalesce dead objects if we want to. |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3909 void do_object(oop obj) { |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3910 HeapWord* obj_addr = (HeapWord*) obj; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3911 assert(_hr->is_in(obj_addr), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3912 size_t obj_size = obj->size(); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3913 _hr->update_bot_for_object(obj_addr, obj_size); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3914 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3915 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3916 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3917 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3918 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3919 _prev_marked_bytes += (obj_size * HeapWordSize); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3920 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3921 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3922 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3923 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3924 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3925 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3926 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3927 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3928 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3929 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3930 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3931 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3932 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3933 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3934 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3935 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3936 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3937 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3938 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3939 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3940 // dummy object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3941 MemRegion mr((HeapWord*)obj, obj_size); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3942 CollectedHeap::fill_with_object(mr); |
342 | 3943 _cm->clearRangeBothMaps(mr); |
3944 } | |
3945 } | |
3946 }; | |
3947 | |
3948 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3949 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3950 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3951 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3952 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3953 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3954 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3955 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3956 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3957 } |
342 | 3958 HeapRegion* cur = g1_policy()->collection_set(); |
3959 while (cur != NULL) { | |
3960 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3961 assert(!cur->isHumongous(), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3962 |
342 | 3963 if (cur->evacuation_failed()) { |
3964 assert(cur->in_collection_set(), "bad CS"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3965 RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3966 |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3967 cur->reset_bot(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3968 cl->set_region(cur); |
342 | 3969 cur->object_iterate(&rspc); |
3970 | |
3971 // A number of manipulations to make the TAMS be the current top, | |
3972 // and the marked bytes be the ones observed in the iteration. | |
3973 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3974 // The comments below are the postconditions achieved by the | |
3975 // calls. Note especially the last such condition, which says that | |
3976 // the count of marked bytes has been properly restored. | |
3977 cur->note_start_of_marking(false); | |
3978 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3979 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3980 // _next_marked_bytes == prev_marked_bytes. | |
3981 cur->note_end_of_marking(); | |
3982 // _prev_top_at_mark_start == top(), | |
3983 // _prev_marked_bytes == prev_marked_bytes | |
3984 } | |
3985 // If there is no mark in progress, we modified the _next variables | |
3986 // above needlessly, but harmlessly. | |
3987 if (_g1h->mark_in_progress()) { | |
3988 cur->note_start_of_marking(false); | |
3989 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3990 // _next_marked_bytes == next_marked_bytes. | |
3991 } | |
3992 | |
3993 // Now make sure the region has the right index in the sorted array. | |
3994 g1_policy()->note_change_in_marked_bytes(cur); | |
3995 } | |
3996 cur = cur->next_in_collection_set(); | |
3997 } | |
3998 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3999 | |
4000 // Now restore saved marks, if any. | |
4001 if (_objs_with_preserved_marks != NULL) { | |
4002 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
4003 guarantee(_objs_with_preserved_marks->length() == | |
4004 _preserved_marks_of_objs->length(), "Both or none."); | |
4005 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
4006 oop obj = _objs_with_preserved_marks->at(i); | |
4007 markOop m = _preserved_marks_of_objs->at(i); | |
4008 obj->set_mark(m); | |
4009 } | |
4010 // Delete the preserved marks growable arrays (allocated on the C heap). | |
4011 delete _objs_with_preserved_marks; | |
4012 delete _preserved_marks_of_objs; | |
4013 _objs_with_preserved_marks = NULL; | |
4014 _preserved_marks_of_objs = NULL; | |
4015 } | |
4016 } | |
4017 | |
4018 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
4019 _evac_failure_scan_stack->push(obj); | |
4020 } | |
4021 | |
4022 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
4023 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
4024 | |
4025 while (_evac_failure_scan_stack->length() > 0) { | |
4026 oop obj = _evac_failure_scan_stack->pop(); | |
4027 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
4028 obj->oop_iterate_backwards(_evac_failure_closure); | |
4029 } | |
4030 } | |
4031 | |
4032 oop | |
4033 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
4034 oop old) { | |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4035 assert(obj_in_cs(old), |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4036 err_msg("obj: "PTR_FORMAT" should still be in the CSet", |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4037 (HeapWord*) old)); |
342 | 4038 markOop m = old->mark(); |
4039 oop forward_ptr = old->forward_to_atomic(old); | |
4040 if (forward_ptr == NULL) { | |
4041 // Forward-to-self succeeded. | |
4042 if (_evac_failure_closure != cl) { | |
4043 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
4044 assert(!_drain_in_progress, | |
4045 "Should only be true while someone holds the lock."); | |
4046 // Set the global evac-failure closure to the current thread's. | |
4047 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
4048 set_evac_failure_closure(cl); | |
4049 // Now do the common part. | |
4050 handle_evacuation_failure_common(old, m); | |
4051 // Reset to NULL. | |
4052 set_evac_failure_closure(NULL); | |
4053 } else { | |
4054 // The lock is already held, and this is recursive. | |
4055 assert(_drain_in_progress, "This should only be the recursive case."); | |
4056 handle_evacuation_failure_common(old, m); | |
4057 } | |
4058 return old; | |
4059 } else { | |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4060 // Forward-to-self failed. Either someone else managed to allocate |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4061 // space for this object (old != forward_ptr) or they beat us in |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4062 // self-forwarding it (old == forward_ptr). |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4063 assert(old == forward_ptr || !obj_in_cs(forward_ptr), |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4064 err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" " |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4065 "should not be in the CSet", |
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4066 (HeapWord*) old, (HeapWord*) forward_ptr)); |
342 | 4067 return forward_ptr; |
4068 } | |
4069 } | |
4070 | |
4071 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
4072 set_evacuation_failed(true); | |
4073 | |
4074 preserve_mark_if_necessary(old, m); | |
4075 | |
4076 HeapRegion* r = heap_region_containing(old); | |
4077 if (!r->evacuation_failed()) { | |
4078 r->set_evacuation_failed(true); | |
1282 | 4079 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4080 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 4081 "["PTR_FORMAT","PTR_FORMAT")\n", |
4082 r, r->bottom(), r->end()); | |
4083 } | |
4084 } | |
4085 | |
4086 push_on_evac_failure_scan_stack(old); | |
4087 | |
4088 if (!_drain_in_progress) { | |
4089 // prevent recursion in copy_to_survivor_space() | |
4090 _drain_in_progress = true; | |
4091 drain_evac_failure_scan_stack(); | |
4092 _drain_in_progress = false; | |
4093 } | |
4094 } | |
4095 | |
4096 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4097 assert(evacuation_failed(), "Oversaving!"); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4098 // We want to call the "for_promotion_failure" version only in the |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4099 // case of a promotion failure. |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4100 if (m->must_be_preserved_for_promotion_failure(obj)) { |
342 | 4101 if (_objs_with_preserved_marks == NULL) { |
4102 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4103 _objs_with_preserved_marks = | |
4104 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4105 _preserved_marks_of_objs = | |
4106 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4107 } | |
4108 _objs_with_preserved_marks->push(obj); | |
4109 _preserved_marks_of_objs->push(m); | |
4110 } | |
4111 } | |
4112 | |
4113 // *** Parallel G1 Evacuation | |
4114 | |
4115 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4116 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4117 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4118 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4119 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4120 |
342 | 4121 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4122 // let the caller handle alloc failure | |
4123 if (alloc_region == NULL) return NULL; | |
4124 | |
4125 HeapWord* block = alloc_region->par_allocate(word_size); | |
4126 if (block == NULL) { | |
4127 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4128 } | |
4129 return block; | |
4130 } | |
4131 | |
545 | 4132 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4133 bool par) { | |
4134 // Another thread might have obtained alloc_region for the given | |
4135 // purpose, and might be attempting to allocate in it, and might | |
4136 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4137 // region below until we're sure the last allocation has happened. | |
4138 // We ensure this by allocating the remaining space with a garbage | |
4139 // object. | |
4140 if (par) par_allocate_remaining_space(alloc_region); | |
4141 // Now we can do the post-GC stuff on the region. | |
4142 alloc_region->note_end_of_copying(); | |
4143 g1_policy()->record_after_bytes(alloc_region->used()); | |
4144 } | |
4145 | |
342 | 4146 HeapWord* |
4147 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4148 HeapRegion* alloc_region, | |
4149 bool par, | |
4150 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4151 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4152 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4153 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4154 |
2152 | 4155 // We need to make sure we serialize calls to this method. Given |
4156 // that the FreeList_lock guards accesses to the free_list anyway, | |
4157 // and we need to potentially remove a region from it, we'll use it | |
4158 // to protect the whole call. | |
4159 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
4160 | |
342 | 4161 HeapWord* block = NULL; |
4162 // In the parallel case, a previous thread to obtain the lock may have | |
4163 // already assigned a new gc_alloc_region. | |
4164 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4165 assert(par, "But should only happen in parallel case."); | |
4166 alloc_region = _gc_alloc_regions[purpose]; | |
4167 if (alloc_region == NULL) return NULL; | |
4168 block = alloc_region->par_allocate(word_size); | |
4169 if (block != NULL) return block; | |
4170 // Otherwise, continue; this new region is empty, too. | |
4171 } | |
4172 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4173 retire_alloc_region(alloc_region, par); |
342 | 4174 |
4175 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4176 // Cannot allocate more regions for the given purpose. | |
4177 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4178 // Is there an alternative? | |
4179 if (purpose != alt_purpose) { | |
4180 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4181 // Has not the alternative region been aliased? | |
545 | 4182 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4183 // Try to allocate in the alternative region. |
4184 if (par) { | |
4185 block = alt_region->par_allocate(word_size); | |
4186 } else { | |
4187 block = alt_region->allocate(word_size); | |
4188 } | |
4189 // Make an alias. | |
4190 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4191 if (block != NULL) { |
4192 return block; | |
4193 } | |
4194 retire_alloc_region(alt_region, par); | |
342 | 4195 } |
4196 // Both the allocation region and the alternative one are full | |
4197 // and aliased, replace them with a new allocation region. | |
4198 purpose = alt_purpose; | |
4199 } else { | |
4200 set_gc_alloc_region(purpose, NULL); | |
4201 return NULL; | |
4202 } | |
4203 } | |
4204 | |
4205 // Now allocate a new region for allocation. | |
2152 | 4206 alloc_region = new_gc_alloc_region(purpose, word_size); |
342 | 4207 |
4208 // let the caller handle alloc failure | |
4209 if (alloc_region != NULL) { | |
4210 | |
4211 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4212 assert(alloc_region->saved_mark_at_top(), | |
4213 "Mark should have been saved already."); | |
4214 // This must be done last: once it's installed, other regions may | |
4215 // allocate in it (without holding the lock.) | |
4216 set_gc_alloc_region(purpose, alloc_region); | |
4217 | |
4218 if (par) { | |
4219 block = alloc_region->par_allocate(word_size); | |
4220 } else { | |
4221 block = alloc_region->allocate(word_size); | |
4222 } | |
4223 // Caller handles alloc failure. | |
4224 } else { | |
4225 // This sets other apis using the same old alloc region to NULL, also. | |
4226 set_gc_alloc_region(purpose, NULL); | |
4227 } | |
4228 return block; // May be NULL. | |
4229 } | |
4230 | |
4231 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4232 HeapWord* block = NULL; | |
4233 size_t free_words; | |
4234 do { | |
4235 free_words = r->free()/HeapWordSize; | |
4236 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4237 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4238 // Otherwise, try to claim it. |
4239 block = r->par_allocate(free_words); | |
4240 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4241 fill_with_object(block, free_words); |
342 | 4242 } |
4243 | |
4244 #ifndef PRODUCT | |
4245 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4246 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4247 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4248 return true; | |
4249 } | |
4250 #endif // PRODUCT | |
4251 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4252 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4253 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4254 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4255 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4256 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4257 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4258 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4259 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4260 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4261 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4262 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4263 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4264 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4265 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4266 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4267 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4268 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4269 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4270 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4271 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4272 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4273 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4274 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4275 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4276 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4277 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4278 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4279 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4280 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4281 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4282 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4283 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4284 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4285 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4286 } |
342 | 4287 |
1709 | 4288 void |
4289 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4290 { | |
4291 st->print_raw_cr("GC Termination Stats"); | |
4292 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4293 " ------waste (KiB)------"); | |
4294 st->print_raw_cr("thr ms ms % ms % attempts" | |
4295 " total alloc undo"); | |
4296 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4297 " ------- ------- -------"); | |
4298 } | |
4299 | |
4300 void | |
4301 G1ParScanThreadState::print_termination_stats(int i, | |
4302 outputStream* const st) const | |
4303 { | |
4304 const double elapsed_ms = elapsed_time() * 1000.0; | |
4305 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4306 const double term_ms = term_time() * 1000.0; | |
4307 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4308 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4309 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4310 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4311 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4312 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4313 alloc_buffer_waste() * HeapWordSize / K, | |
4314 undo_waste() * HeapWordSize / K); | |
4315 } | |
4316 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4317 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4318 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4319 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4320 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4321 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4322 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4323 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4324 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4325 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4326 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4327 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4328 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4329 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4330 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4331 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4332 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4333 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4334 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4335 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4336 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4337 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4338 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4339 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4340 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4341 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4342 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4343 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4344 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4345 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4346 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4347 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4348 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4349 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4350 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4351 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4352 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4353 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4354 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4355 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4356 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4357 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4358 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4359 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4360 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4361 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4362 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4363 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4364 |
342 | 4365 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4366 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4367 _par_scan_state(par_scan_state) { } | |
4368 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4369 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4370 // This is called _after_ do_oop_work has been called, hence after |
4371 // the object has been relocated to its new location and *p points | |
4372 // to its new location. | |
4373 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4374 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4375 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4376 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4377 HeapWord* addr = (HeapWord*)obj; |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4378 if (_g1->is_in_g1_reserved(addr)) { |
342 | 4379 _cm->grayRoot(oop(addr)); |
3323
75af3e8de182
7040450: G1: assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj))) failed: shouldn't still be in ...
tonyp
parents:
3317
diff
changeset
|
4380 } |
342 | 4381 } |
4382 } | |
4383 | |
4384 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4385 size_t word_sz = old->size(); | |
4386 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4387 // +1 to make the -1 indexes valid... | |
4388 int young_index = from_region->young_index_in_cset()+1; | |
4389 assert( (from_region->is_young() && young_index > 0) || | |
4390 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4391 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4392 markOop m = old->mark(); | |
545 | 4393 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4394 : m->age(); | |
4395 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4396 word_sz); |
4397 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4398 oop obj = oop(obj_ptr); | |
4399 | |
4400 if (obj_ptr == NULL) { | |
4401 // This will either forward-to-self, or detect that someone else has | |
4402 // installed a forwarding pointer. | |
4403 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4404 return _g1->handle_evacuation_failure_par(cl, old); | |
4405 } | |
4406 | |
526 | 4407 // We're going to allocate linearly, so might as well prefetch ahead. |
4408 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4409 | |
342 | 4410 oop forward_ptr = old->forward_to_atomic(obj); |
4411 if (forward_ptr == NULL) { | |
4412 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4413 if (g1p->track_object_age(alloc_purpose)) { |
4414 // We could simply do obj->incr_age(). However, this causes a | |
4415 // performance issue. obj->incr_age() will first check whether | |
4416 // the object has a displaced mark by checking its mark word; | |
4417 // getting the mark word from the new location of the object | |
4418 // stalls. So, given that we already have the mark word and we | |
4419 // are about to install it anyway, it's better to increase the | |
4420 // age on the mark word, when the object does not have a | |
4421 // displaced mark word. We're not expecting many objects to have | |
4422 // a displaced marked word, so that case is not optimized | |
4423 // further (it could be...) and we simply call obj->incr_age(). | |
4424 | |
4425 if (m->has_displaced_mark_helper()) { | |
4426 // in this case, we have to install the mark word first, | |
4427 // otherwise obj looks to be forwarded (the old mark word, | |
4428 // which contains the forward pointer, was copied) | |
4429 obj->set_mark(m); | |
4430 obj->incr_age(); | |
4431 } else { | |
4432 m = m->incr_age(); | |
545 | 4433 obj->set_mark(m); |
526 | 4434 } |
545 | 4435 _par_scan_state->age_table()->add(obj, word_sz); |
4436 } else { | |
4437 obj->set_mark(m); | |
526 | 4438 } |
4439 | |
342 | 4440 // preserve "next" mark bit |
4441 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4442 if (!use_local_bitmaps || | |
4443 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4444 // if we couldn't mark it on the local bitmap (this happens when | |
4445 // the object was not allocated in the GCLab), we have to bite | |
4446 // the bullet and do the standard parallel mark | |
4447 _cm->markAndGrayObjectIfNecessary(obj); | |
4448 } | |
4449 #if 1 | |
4450 if (_g1->isMarkedNext(old)) { | |
4451 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4452 } | |
4453 #endif | |
4454 } | |
4455 | |
4456 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4457 surv_young_words[young_index] += word_sz; | |
4458 | |
4459 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4460 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4461 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4462 _par_scan_state->push_on_queue(old_p); |
342 | 4463 } else { |
526 | 4464 // No point in using the slower heap_region_containing() method, |
4465 // given that we know obj is in the heap. | |
4466 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4467 obj->oop_iterate_backwards(_scanner); |
4468 } | |
4469 } else { | |
4470 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4471 obj = forward_ptr; | |
4472 } | |
4473 return obj; | |
4474 } | |
4475 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4476 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4477 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4478 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4479 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4480 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4481 assert(barrier != G1BarrierRS || obj != NULL, |
4482 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4483 | |
526 | 4484 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4485 if (_g1->in_cset_fast_test(obj)) { |
342 | 4486 #if G1_REM_SET_LOGGING |
526 | 4487 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4488 "into CS.", p, (void*) obj); | |
342 | 4489 #endif |
526 | 4490 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4491 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4492 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4493 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4494 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4495 } |
526 | 4496 // When scanning the RS, we only care about objs in CS. |
4497 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4498 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4499 } |
526 | 4500 } |
4501 | |
4502 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4503 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4504 } |
4505 | |
4506 if (do_gen_barrier && obj != NULL) { | |
4507 par_do_barrier(p); | |
4508 } | |
4509 } | |
4510 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4511 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4512 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4513 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4514 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4515 assert(has_partial_array_mask(p), "invariant"); |
4516 oop old = clear_partial_array_mask(p); | |
342 | 4517 assert(old->is_objArray(), "must be obj array"); |
4518 assert(old->is_forwarded(), "must be forwarded"); | |
4519 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4520 | |
4521 objArrayOop obj = objArrayOop(old->forwardee()); | |
4522 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4523 // Process ParGCArrayScanChunk elements now | |
4524 // and push the remainder back onto queue | |
4525 int start = arrayOop(old)->length(); | |
4526 int end = obj->length(); | |
4527 int remainder = end - start; | |
4528 assert(start <= end, "just checking"); | |
4529 if (remainder > 2 * ParGCArrayScanChunk) { | |
4530 // Test above combines last partial chunk with a full chunk | |
4531 end = start + ParGCArrayScanChunk; | |
4532 arrayOop(old)->set_length(end); | |
4533 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4534 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4535 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4536 _par_scan_state->push_on_queue(old_p); |
342 | 4537 } else { |
4538 // Restore length so that the heap remains parsable in | |
4539 // case of evacuation failure. | |
4540 arrayOop(old)->set_length(end); | |
4541 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4542 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4543 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4544 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4545 } |
4546 | |
4547 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4548 protected: | |
4549 G1CollectedHeap* _g1h; | |
4550 G1ParScanThreadState* _par_scan_state; | |
4551 RefToScanQueueSet* _queues; | |
4552 ParallelTaskTerminator* _terminator; | |
4553 | |
4554 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4555 RefToScanQueueSet* queues() { return _queues; } | |
4556 ParallelTaskTerminator* terminator() { return _terminator; } | |
4557 | |
4558 public: | |
4559 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4560 G1ParScanThreadState* par_scan_state, | |
4561 RefToScanQueueSet* queues, | |
4562 ParallelTaskTerminator* terminator) | |
4563 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4564 _queues(queues), _terminator(terminator) {} | |
4565 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4566 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4567 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4568 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4569 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4570 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4571 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4572 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4573 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4574 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4575 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4576 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4577 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4578 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4579 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4580 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4581 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4582 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4583 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4584 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4585 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4586 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4587 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4588 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4589 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4590 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4591 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4592 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4593 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4594 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4595 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4596 // we drain the queues as necessary. |
342 | 4597 pss->trim_queue(); |
4598 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4599 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4600 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4601 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4602 } |
342 | 4603 |
4604 class G1ParTask : public AbstractGangTask { | |
4605 protected: | |
4606 G1CollectedHeap* _g1h; | |
4607 RefToScanQueueSet *_queues; | |
4608 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4609 int _n_workers; |
342 | 4610 |
4611 Mutex _stats_lock; | |
4612 Mutex* stats_lock() { return &_stats_lock; } | |
4613 | |
4614 size_t getNCards() { | |
4615 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4616 / G1BlockOffsetSharedArray::N_bytes; | |
4617 } | |
4618 | |
4619 public: | |
4620 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4621 : AbstractGangTask("G1 collection"), | |
4622 _g1h(g1h), | |
4623 _queues(task_queues), | |
4624 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4625 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4626 _n_workers(workers) |
342 | 4627 {} |
4628 | |
4629 RefToScanQueueSet* queues() { return _queues; } | |
4630 | |
4631 RefToScanQueue *work_queue(int i) { | |
4632 return queues()->queue(i); | |
4633 } | |
4634 | |
4635 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4636 if (i >= _n_workers) return; // no work needed this round |
1611 | 4637 |
4638 double start_time_ms = os::elapsedTime() * 1000.0; | |
4639 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4640 | |
342 | 4641 ResourceMark rm; |
4642 HandleMark hm; | |
4643 | |
526 | 4644 G1ParScanThreadState pss(_g1h, i); |
4645 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4646 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4647 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4648 |
4649 pss.set_evac_closure(&scan_evac_cl); | |
4650 pss.set_evac_failure_closure(&evac_failure_cl); | |
4651 pss.set_partial_scan_closure(&partial_scan_cl); | |
4652 | |
4653 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4654 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4655 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4656 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4657 |
342 | 4658 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4659 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4660 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4661 | |
4662 OopsInHeapRegionClosure *scan_root_cl; | |
4663 OopsInHeapRegionClosure *scan_perm_cl; | |
4664 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4665 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4666 scan_root_cl = &scan_mark_root_cl; |
4667 scan_perm_cl = &scan_mark_perm_cl; | |
4668 } else { | |
4669 scan_root_cl = &only_scan_root_cl; | |
4670 scan_perm_cl = &only_scan_perm_cl; | |
4671 } | |
4672 | |
4673 pss.start_strong_roots(); | |
4674 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4675 SharedHeap::SO_AllClasses, | |
4676 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4677 &push_heap_rs_cl, |
342 | 4678 scan_perm_cl, |
4679 i); | |
4680 pss.end_strong_roots(); | |
4681 { | |
4682 double start = os::elapsedTime(); | |
4683 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4684 evac.do_void(); | |
4685 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4686 double term_ms = pss.term_time()*1000.0; | |
4687 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4688 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4689 } |
1282 | 4690 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4691 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4692 | |
4693 // Clean up any par-expanded rem sets. | |
4694 HeapRegionRemSet::par_cleanup(); | |
4695 | |
4696 if (ParallelGCVerbose) { | |
1709 | 4697 MutexLocker x(stats_lock()); |
4698 pss.print_termination_stats(i); | |
342 | 4699 } |
4700 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4701 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4702 double end_time_ms = os::elapsedTime() * 1000.0; |
4703 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4704 } |
4705 }; | |
4706 | |
4707 // *** Common G1 Evacuation Stuff | |
4708 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4709 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4710 |
342 | 4711 void |
4712 G1CollectedHeap:: | |
4713 g1_process_strong_roots(bool collecting_perm_gen, | |
4714 SharedHeap::ScanningOption so, | |
4715 OopClosure* scan_non_heap_roots, | |
4716 OopsInHeapRegionClosure* scan_rs, | |
4717 OopsInGenClosure* scan_perm, | |
4718 int worker_i) { | |
4719 // First scan the strong roots, including the perm gen. | |
4720 double ext_roots_start = os::elapsedTime(); | |
4721 double closure_app_time_sec = 0.0; | |
4722 | |
4723 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4724 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4725 buf_scan_perm.set_generation(perm_gen()); | |
4726 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4727 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4728 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4729 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4730 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4731 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4732 collecting_perm_gen, so, |
342 | 4733 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4734 &eager_scan_code_roots, |
342 | 4735 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4736 |
342 | 4737 // Finish up any enqueued closure apps. |
4738 buf_scan_non_heap_roots.done(); | |
4739 buf_scan_perm.done(); | |
4740 double ext_roots_end = os::elapsedTime(); | |
4741 g1_policy()->reset_obj_copy_time(worker_i); | |
4742 double obj_copy_time_sec = | |
4743 buf_scan_non_heap_roots.closure_app_seconds() + | |
4744 buf_scan_perm.closure_app_seconds(); | |
4745 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4746 double ext_root_time_ms = | |
4747 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4748 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4749 | |
4750 // Scan strong roots in mark stack. | |
4751 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4752 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4753 } | |
4754 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4755 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4756 | |
4757 // XXX What should this be doing in the parallel case? | |
4758 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4759 // Now scan the complement of the collection set. | |
4760 if (scan_rs != NULL) { | |
4761 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4762 } | |
4763 // Finish with the ref_processor roots. | |
4764 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4765 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4766 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4767 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4768 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4769 ref_processor()->oops_do(scan_non_heap_roots); |
4770 } | |
4771 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4772 _process_strong_tasks->all_tasks_completed(); | |
4773 } | |
4774 | |
4775 void | |
4776 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4777 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4778 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4779 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4780 } |
4781 | |
4782 | |
4783 class SaveMarksClosure: public HeapRegionClosure { | |
4784 public: | |
4785 bool doHeapRegion(HeapRegion* r) { | |
4786 r->save_marks(); | |
4787 return false; | |
4788 } | |
4789 }; | |
4790 | |
4791 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4792 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4793 SaveMarksClosure sm; |
4794 heap_region_iterate(&sm); | |
4795 } | |
4796 // We do this even in the parallel case | |
4797 perm_gen()->save_marks(); | |
4798 } | |
4799 | |
4800 void G1CollectedHeap::evacuate_collection_set() { | |
4801 set_evacuation_failed(false); | |
4802 | |
4803 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4804 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4805 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4806 | |
342 | 4807 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4808 set_par_threads(n_workers); | |
4809 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4810 | |
4811 init_for_evac_failure(NULL); | |
4812 | |
4813 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4814 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4815 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4816 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4817 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4818 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4819 StrongRootsScope srs(this); |
1709 | 4820 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4821 workers()->run_task(&g1_par_task); |
4822 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4823 StrongRootsScope srs(this); |
342 | 4824 g1_par_task.work(0); |
4825 } | |
4826 | |
4827 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4828 g1_policy()->record_par_time(par_time); | |
4829 set_par_threads(0); | |
4830 // Is this the right thing to do here? We don't save marks | |
4831 // on individual heap regions when we allocate from | |
4832 // them in parallel, so this seems like the correct place for this. | |
545 | 4833 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4834 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4835 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4836 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4837 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4838 // here too. |
342 | 4839 { |
4840 G1IsAliveClosure is_alive(this); | |
4841 G1KeepAliveClosure keep_alive(this); | |
4842 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4843 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4844 release_gc_alloc_regions(false /* totally */); |
342 | 4845 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4846 |
889 | 4847 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4848 concurrent_g1_refine()->set_use_cache(true); |
4849 | |
4850 finalize_for_evac_failure(); | |
4851 | |
4852 // Must do this before removing self-forwarding pointers, which clears | |
4853 // the per-region evac-failure flags. | |
4854 concurrent_mark()->complete_marking_in_collection_set(); | |
4855 | |
4856 if (evacuation_failed()) { | |
4857 remove_self_forwarding_pointers(); | |
4858 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4859 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4860 } else if (PrintGC) { |
4861 gclog_or_tty->print("--"); | |
4862 } | |
4863 } | |
4864 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4865 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4866 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4867 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4868 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4869 |
4870 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4871 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4872 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4873 } |
342 | 4874 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4875 } | |
4876 | |
2173 | 4877 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, |
2152 | 4878 size_t* pre_used, |
4879 FreeRegionList* free_list, | |
4880 HumongousRegionSet* humongous_proxy_set, | |
2173 | 4881 HRRSCleanupTask* hrrs_cleanup_task, |
2152 | 4882 bool par) { |
4883 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { | |
4884 if (hr->isHumongous()) { | |
4885 assert(hr->startsHumongous(), "we should only see starts humongous"); | |
4886 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); | |
4887 } else { | |
4888 free_region(hr, pre_used, free_list, par); | |
342 | 4889 } |
2173 | 4890 } else { |
4891 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task); | |
342 | 4892 } |
4893 } | |
4894 | |
2152 | 4895 void G1CollectedHeap::free_region(HeapRegion* hr, |
4896 size_t* pre_used, | |
4897 FreeRegionList* free_list, | |
4898 bool par) { | |
4899 assert(!hr->isHumongous(), "this is only for non-humongous regions"); | |
4900 assert(!hr->is_empty(), "the region should not be empty"); | |
4901 assert(free_list != NULL, "pre-condition"); | |
4902 | |
4903 *pre_used += hr->used(); | |
4904 hr->hr_clear(par, true /* clear_space */); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4905 free_list->add_as_head(hr); |
2152 | 4906 } |
4907 | |
4908 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, | |
4909 size_t* pre_used, | |
4910 FreeRegionList* free_list, | |
4911 HumongousRegionSet* humongous_proxy_set, | |
4912 bool par) { | |
4913 assert(hr->startsHumongous(), "this is only for starts humongous regions"); | |
4914 assert(free_list != NULL, "pre-condition"); | |
4915 assert(humongous_proxy_set != NULL, "pre-condition"); | |
4916 | |
4917 size_t hr_used = hr->used(); | |
4918 size_t hr_capacity = hr->capacity(); | |
4919 size_t hr_pre_used = 0; | |
4920 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); | |
4921 hr->set_notHumongous(); | |
4922 free_region(hr, &hr_pre_used, free_list, par); | |
4923 | |
3766 | 4924 size_t i = hr->hrs_index() + 1; |
2152 | 4925 size_t num = 1; |
3766 | 4926 while (i < n_regions()) { |
4927 HeapRegion* curr_hr = region_at(i); | |
2152 | 4928 if (!curr_hr->continuesHumongous()) { |
4929 break; | |
4930 } | |
4931 curr_hr->set_notHumongous(); | |
4932 free_region(curr_hr, &hr_pre_used, free_list, par); | |
4933 num += 1; | |
4934 i += 1; | |
4935 } | |
4936 assert(hr_pre_used == hr_used, | |
4937 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " | |
4938 "should be the same", hr_pre_used, hr_used)); | |
4939 *pre_used += hr_pre_used; | |
4940 } | |
4941 | |
4942 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, | |
4943 FreeRegionList* free_list, | |
4944 HumongousRegionSet* humongous_proxy_set, | |
4945 bool par) { | |
4946 if (pre_used > 0) { | |
4947 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; | |
342 | 4948 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
2152 | 4949 assert(_summary_bytes_used >= pre_used, |
4950 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" " | |
4951 "should be >= pre_used: "SIZE_FORMAT, | |
4952 _summary_bytes_used, pre_used)); | |
342 | 4953 _summary_bytes_used -= pre_used; |
2152 | 4954 } |
4955 if (free_list != NULL && !free_list->is_empty()) { | |
4956 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
2432
455328d90876
7029458: G1: Add newly-reclaimed regions to the beginning of the region free list, not the end
tonyp
parents:
2369
diff
changeset
|
4957 _free_list.add_as_head(free_list); |
2152 | 4958 } |
4959 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { | |
4960 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); | |
4961 _humongous_set.update_from_proxy(humongous_proxy_set); | |
342 | 4962 } |
4963 } | |
4964 | |
4965 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4966 while (list != NULL) { | |
4967 guarantee( list->is_young(), "invariant" ); | |
4968 | |
4969 HeapWord* bottom = list->bottom(); | |
4970 HeapWord* end = list->end(); | |
4971 MemRegion mr(bottom, end); | |
4972 ct_bs->dirty(mr); | |
4973 | |
4974 list = list->get_next_young_region(); | |
4975 } | |
4976 } | |
4977 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4978 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4979 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4980 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4981 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4982 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4983 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4984 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4985 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4986 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4987 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4988 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4989 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4990 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4991 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4992 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4993 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4994 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4995 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4996 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4997 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4998 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4999 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5000 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5001 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5002 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5003 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5004 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5005 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5006 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5007 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5008 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5009 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5010 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5011 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5012 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5013 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5014 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5015 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5016 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5017 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5018 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5019 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5020 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5021 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5022 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5023 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5024 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5025 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5026 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5027 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5028 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5029 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5030 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5031 public: |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5032 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5033 : _g1h(g1h), _ct_bs(ct_bs) { } |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5034 virtual bool doHeapRegion(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5035 if (r->is_survivor()) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5036 _g1h->verify_dirty_region(r); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5037 } else { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5038 _g1h->verify_not_dirty_region(r); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5039 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5040 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5041 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5042 }; |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5043 |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5044 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5045 // All of the region should be clean. |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5046 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5047 MemRegion mr(hr->bottom(), hr->end()); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5048 ct_bs->verify_not_dirty_region(mr); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5049 } |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5050 |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5051 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5052 // We cannot guarantee that [bottom(),end()] is dirty. Threads |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5053 // dirty allocated blocks as they allocate them. The thread that |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5054 // retires each region and replaces it with a new one will do a |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5055 // maximal allocation to fill in [pre_dummy_top(),end()] but will |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5056 // not dirty that area (one less thing to have to do while holding |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5057 // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5058 // is dirty. |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5059 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5060 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5061 ct_bs->verify_dirty_region(mr); |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5062 } |
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5063 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5064 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5065 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5066 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5067 verify_dirty_region(hr); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5068 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5069 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5070 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5071 void G1CollectedHeap::verify_dirty_young_regions() { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5072 verify_dirty_young_list(_young_list->first_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5073 verify_dirty_young_list(_young_list->first_survivor_region()); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5074 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5075 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5076 |
342 | 5077 void G1CollectedHeap::cleanUpCardTable() { |
5078 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
5079 double start = os::elapsedTime(); | |
5080 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5081 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5082 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5083 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5084 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5085 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5086 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5087 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5088 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5089 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5090 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5091 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5092 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5093 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5094 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5095 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5096 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5097 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5098 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5099 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5100 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5101 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5102 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5103 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5104 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5105 |
342 | 5106 double elapsed = os::elapsedTime() - start; |
5107 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5108 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5109 if (G1VerifyCTCleanup || VerifyAfterGC) { |
3317
063382f9b575
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
tonyp
parents:
3293
diff
changeset
|
5110 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs); |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5111 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5112 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5113 #endif |
342 | 5114 } |
5115 | |
5116 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
2152 | 5117 size_t pre_used = 0; |
5118 FreeRegionList local_free_list("Local List for CSet Freeing"); | |
5119 | |
342 | 5120 double young_time_ms = 0.0; |
5121 double non_young_time_ms = 0.0; | |
5122 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5123 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5124 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5125 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5126 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5127 |
342 | 5128 G1CollectorPolicy* policy = g1_policy(); |
5129 | |
5130 double start_sec = os::elapsedTime(); | |
5131 bool non_young = true; | |
5132 | |
5133 HeapRegion* cur = cs_head; | |
5134 int age_bound = -1; | |
5135 size_t rs_lengths = 0; | |
5136 | |
5137 while (cur != NULL) { | |
2361 | 5138 assert(!is_on_master_free_list(cur), "sanity"); |
2152 | 5139 |
342 | 5140 if (non_young) { |
5141 if (cur->is_young()) { | |
5142 double end_sec = os::elapsedTime(); | |
5143 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5144 non_young_time_ms += elapsed_ms; | |
5145 | |
5146 start_sec = os::elapsedTime(); | |
5147 non_young = false; | |
5148 } | |
5149 } else { | |
2152 | 5150 double end_sec = os::elapsedTime(); |
5151 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5152 young_time_ms += elapsed_ms; | |
5153 | |
5154 start_sec = os::elapsedTime(); | |
5155 non_young = true; | |
342 | 5156 } |
5157 | |
5158 rs_lengths += cur->rem_set()->occupied(); | |
5159 | |
5160 HeapRegion* next = cur->next_in_collection_set(); | |
5161 assert(cur->in_collection_set(), "bad CS"); | |
5162 cur->set_next_in_collection_set(NULL); | |
5163 cur->set_in_collection_set(false); | |
5164 | |
5165 if (cur->is_young()) { | |
5166 int index = cur->young_index_in_cset(); | |
5167 guarantee( index != -1, "invariant" ); | |
5168 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5169 size_t words_survived = _surviving_young_words[index]; | |
5170 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5171 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5172 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5173 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5174 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5175 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5176 cur->set_next_young_region(NULL); |
342 | 5177 } else { |
5178 int index = cur->young_index_in_cset(); | |
5179 guarantee( index == -1, "invariant" ); | |
5180 } | |
5181 | |
5182 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5183 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5184 "invariant" ); | |
5185 | |
5186 if (!cur->evacuation_failed()) { | |
5187 // And the region is empty. | |
2152 | 5188 assert(!cur->is_empty(), "Should not have empty regions in a CS."); |
5189 free_region(cur, &pre_used, &local_free_list, false /* par */); | |
342 | 5190 } else { |
5191 cur->uninstall_surv_rate_group(); | |
5192 if (cur->is_young()) | |
5193 cur->set_young_index_in_cset(-1); | |
5194 cur->set_not_young(); | |
5195 cur->set_evacuation_failed(false); | |
5196 } | |
5197 cur = next; | |
5198 } | |
5199 | |
5200 policy->record_max_rs_lengths(rs_lengths); | |
5201 policy->cset_regions_freed(); | |
5202 | |
5203 double end_sec = os::elapsedTime(); | |
5204 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5205 if (non_young) | |
5206 non_young_time_ms += elapsed_ms; | |
5207 else | |
5208 young_time_ms += elapsed_ms; | |
5209 | |
2152 | 5210 update_sets_after_freeing_regions(pre_used, &local_free_list, |
5211 NULL /* humongous_proxy_set */, | |
5212 false /* par */); | |
342 | 5213 policy->record_young_free_cset_time_ms(young_time_ms); |
5214 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5215 } | |
5216 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5217 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5218 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5219 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5220 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5221 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5222 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5223 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5224 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5225 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5226 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5227 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5228 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5229 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5230 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5231 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5232 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5233 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5234 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5235 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5236 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5237 |
2152 | 5238 void G1CollectedHeap::set_free_regions_coming() { |
5239 if (G1ConcRegionFreeingVerbose) { | |
5240 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5241 "setting free regions coming"); | |
5242 } | |
5243 | |
5244 assert(!free_regions_coming(), "pre-condition"); | |
5245 _free_regions_coming = true; | |
342 | 5246 } |
5247 | |
2152 | 5248 void G1CollectedHeap::reset_free_regions_coming() { |
5249 { | |
5250 assert(free_regions_coming(), "pre-condition"); | |
5251 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5252 _free_regions_coming = false; | |
5253 SecondaryFreeList_lock->notify_all(); | |
5254 } | |
5255 | |
5256 if (G1ConcRegionFreeingVerbose) { | |
5257 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5258 "reset free regions coming"); | |
342 | 5259 } |
5260 } | |
5261 | |
2152 | 5262 void G1CollectedHeap::wait_while_free_regions_coming() { |
5263 // Most of the time we won't have to wait, so let's do a quick test | |
5264 // first before we take the lock. | |
5265 if (!free_regions_coming()) { | |
5266 return; | |
5267 } | |
5268 | |
5269 if (G1ConcRegionFreeingVerbose) { | |
5270 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5271 "waiting for free regions"); | |
342 | 5272 } |
5273 | |
5274 { | |
2152 | 5275 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
5276 while (free_regions_coming()) { | |
5277 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
342 | 5278 } |
2152 | 5279 } |
5280 | |
5281 if (G1ConcRegionFreeingVerbose) { | |
5282 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5283 "done waiting for free regions"); | |
5284 } | |
342 | 5285 } |
5286 | |
5287 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5288 assert(heap_lock_held_for_gc(), | |
5289 "the heap lock should already be held by or for this thread"); | |
5290 _young_list->push_region(hr); | |
5291 g1_policy()->set_region_short_lived(hr); | |
5292 } | |
5293 | |
5294 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5295 private: | |
5296 bool _success; | |
5297 public: | |
5298 NoYoungRegionsClosure() : _success(true) { } | |
5299 bool doHeapRegion(HeapRegion* r) { | |
5300 if (r->is_young()) { | |
5301 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5302 r->bottom(), r->end()); | |
5303 _success = false; | |
5304 } | |
5305 return false; | |
5306 } | |
5307 bool success() { return _success; } | |
5308 }; | |
5309 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5310 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5311 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5312 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5313 if (check_heap) { |
342 | 5314 NoYoungRegionsClosure closure; |
5315 heap_region_iterate(&closure); | |
5316 ret = ret && closure.success(); | |
5317 } | |
5318 | |
5319 return ret; | |
5320 } | |
5321 | |
5322 void G1CollectedHeap::empty_young_list() { | |
5323 assert(heap_lock_held_for_gc(), | |
5324 "the heap lock should already be held by or for this thread"); | |
5325 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5326 | |
5327 _young_list->empty_list(); | |
5328 } | |
5329 | |
5330 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5331 bool no_allocs = true; | |
5332 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5333 HeapRegion* r = _gc_alloc_regions[ap]; | |
5334 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5335 } | |
5336 return no_allocs; | |
5337 } | |
5338 | |
545 | 5339 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5340 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5341 HeapRegion* r = _gc_alloc_regions[ap]; | |
5342 if (r != NULL) { | |
5343 // Check for aliases. | |
5344 bool has_processed_alias = false; | |
5345 for (int i = 0; i < ap; ++i) { | |
5346 if (_gc_alloc_regions[i] == r) { | |
5347 has_processed_alias = true; | |
5348 break; | |
5349 } | |
5350 } | |
5351 if (!has_processed_alias) { | |
545 | 5352 retire_alloc_region(r, false /* par */); |
342 | 5353 } |
5354 } | |
5355 } | |
5356 } | |
5357 | |
5358 // Done at the start of full GC. | |
5359 void G1CollectedHeap::tear_down_region_lists() { | |
2152 | 5360 _free_list.remove_all(); |
342 | 5361 } |
5362 | |
5363 class RegionResetter: public HeapRegionClosure { | |
2152 | 5364 G1CollectedHeap* _g1h; |
5365 FreeRegionList _local_free_list; | |
5366 | |
342 | 5367 public: |
2152 | 5368 RegionResetter() : _g1h(G1CollectedHeap::heap()), |
5369 _local_free_list("Local Free List for RegionResetter") { } | |
5370 | |
342 | 5371 bool doHeapRegion(HeapRegion* r) { |
5372 if (r->continuesHumongous()) return false; | |
5373 if (r->top() > r->bottom()) { | |
5374 if (r->top() < r->end()) { | |
5375 Copy::fill_to_words(r->top(), | |
5376 pointer_delta(r->end(), r->top())); | |
5377 } | |
5378 } else { | |
5379 assert(r->is_empty(), "tautology"); | |
2152 | 5380 _local_free_list.add_as_tail(r); |
342 | 5381 } |
5382 return false; | |
5383 } | |
5384 | |
2152 | 5385 void update_free_lists() { |
5386 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, | |
5387 false /* par */); | |
5388 } | |
342 | 5389 }; |
5390 | |
5391 // Done at the end of full GC. | |
5392 void G1CollectedHeap::rebuild_region_lists() { | |
5393 // This needs to go at the end of the full GC. | |
5394 RegionResetter rs; | |
5395 heap_region_iterate(&rs); | |
2152 | 5396 rs.update_free_lists(); |
342 | 5397 } |
5398 | |
5399 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5400 _refine_cte_cl->set_concurrent(concurrent); | |
5401 } | |
5402 | |
5403 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5404 HeapRegion* hr = heap_region_containing(p); | |
5405 if (hr == NULL) { | |
5406 return is_in_permanent(p); | |
5407 } else { | |
5408 return hr->is_in(p); | |
5409 } | |
5410 } | |
2152 | 5411 |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5412 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5413 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5414 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5415 assert(!force || g1_policy()->can_expand_young_list(), |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5416 "if force is true we should be able to expand the young list"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5417 if (force || !g1_policy()->is_young_list_full()) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5418 HeapRegion* new_alloc_region = new_region(word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5419 false /* do_expand */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5420 if (new_alloc_region != NULL) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5421 g1_policy()->update_region_num(true /* next_is_young */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5422 set_region_short_lived_locked(new_alloc_region); |
3289
b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
jmasa
parents:
3285
diff
changeset
|
5423 g1mm()->update_eden_counters(); |
2433
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5424 return new_alloc_region; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5425 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5426 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5427 return NULL; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5428 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5429 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5430 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5431 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5432 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5433 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5434 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5435 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5436 _summary_bytes_used += allocated_bytes; |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5437 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5438 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5439 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5440 bool force) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5441 return _g1h->new_mutator_alloc_region(word_size, force); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5442 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5443 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5444 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5445 size_t allocated_bytes) { |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5446 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5447 } |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5448 |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5449 // Heap region set verification |
abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
2432
diff
changeset
|
5450 |
2152 | 5451 class VerifyRegionListsClosure : public HeapRegionClosure { |
5452 private: | |
5453 HumongousRegionSet* _humongous_set; | |
5454 FreeRegionList* _free_list; | |
5455 size_t _region_count; | |
5456 | |
5457 public: | |
5458 VerifyRegionListsClosure(HumongousRegionSet* humongous_set, | |
5459 FreeRegionList* free_list) : | |
5460 _humongous_set(humongous_set), _free_list(free_list), | |
5461 _region_count(0) { } | |
5462 | |
5463 size_t region_count() { return _region_count; } | |
5464 | |
5465 bool doHeapRegion(HeapRegion* hr) { | |
5466 _region_count += 1; | |
5467 | |
5468 if (hr->continuesHumongous()) { | |
5469 return false; | |
5470 } | |
5471 | |
5472 if (hr->is_young()) { | |
5473 // TODO | |
5474 } else if (hr->startsHumongous()) { | |
5475 _humongous_set->verify_next_region(hr); | |
5476 } else if (hr->is_empty()) { | |
5477 _free_list->verify_next_region(hr); | |
5478 } | |
5479 return false; | |
5480 } | |
5481 }; | |
5482 | |
3766 | 5483 HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index, |
5484 HeapWord* bottom) { | |
5485 HeapWord* end = bottom + HeapRegion::GrainWords; | |
5486 MemRegion mr(bottom, end); | |
5487 assert(_g1_reserved.contains(mr), "invariant"); | |
5488 // This might return NULL if the allocation fails | |
5489 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */); | |
5490 } | |
5491 | |
2152 | 5492 void G1CollectedHeap::verify_region_sets() { |
5493 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | |
5494 | |
5495 // First, check the explicit lists. | |
5496 _free_list.verify(); | |
5497 { | |
5498 // Given that a concurrent operation might be adding regions to | |
5499 // the secondary free list we have to take the lock before | |
5500 // verifying it. | |
5501 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5502 _secondary_free_list.verify(); | |
5503 } | |
5504 _humongous_set.verify(); | |
5505 | |
5506 // If a concurrent region freeing operation is in progress it will | |
5507 // be difficult to correctly attributed any free regions we come | |
5508 // across to the correct free list given that they might belong to | |
5509 // one of several (free_list, secondary_free_list, any local lists, | |
5510 // etc.). So, if that's the case we will skip the rest of the | |
5511 // verification operation. Alternatively, waiting for the concurrent | |
5512 // operation to complete will have a non-trivial effect on the GC's | |
5513 // operation (no concurrent operation will last longer than the | |
5514 // interval between two calls to verification) and it might hide | |
5515 // any issues that we would like to catch during testing. | |
5516 if (free_regions_coming()) { | |
5517 return; | |
5518 } | |
5519 | |
2361 | 5520 // Make sure we append the secondary_free_list on the free_list so |
5521 // that all free regions we will come across can be safely | |
5522 // attributed to the free_list. | |
5523 append_secondary_free_list_if_not_empty_with_lock(); | |
2152 | 5524 |
5525 // Finally, make sure that the region accounting in the lists is | |
5526 // consistent with what we see in the heap. | |
5527 _humongous_set.verify_start(); | |
5528 _free_list.verify_start(); | |
5529 | |
5530 VerifyRegionListsClosure cl(&_humongous_set, &_free_list); | |
5531 heap_region_iterate(&cl); | |
5532 | |
5533 _humongous_set.verify_end(); | |
5534 _free_list.verify_end(); | |
342 | 5535 } |