Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 2152:0fa27f37d4d4
6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc
author | tonyp |
---|---|
date | Wed, 19 Jan 2011 19:30:42 -0500 |
parents | ffd725ff6943 |
children | 97ba643ea3ed |
rev | line source |
---|---|
342 | 1 /* |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | |
32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
33 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
34 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
35 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
36 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
38 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
39 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
40 #include "memory/gcLocker.inline.hpp" | |
41 #include "memory/genOopClosures.inline.hpp" | |
42 #include "memory/generationSpec.hpp" | |
43 #include "oops/oop.inline.hpp" | |
44 #include "oops/oop.pcgc.inline.hpp" | |
45 #include "runtime/aprofiler.hpp" | |
46 #include "runtime/vmThread.hpp" | |
342 | 47 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
48 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 |
342 | 50 // turn it on so that the contents of the young list (scan-only / |
51 // to-be-collected) are printed at "strategic" points before / during | |
52 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
53 #define YOUNG_LIST_VERBOSE 0 |
342 | 54 // CURRENT STATUS |
55 // This file is under construction. Search for "FIXME". | |
56 | |
57 // INVARIANTS/NOTES | |
58 // | |
59 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 60 // serialized by acquiring the HeapLock. This happens in mem_allocate |
61 // and allocate_new_tlab, which are the "entry" points to the | |
62 // allocation code from the rest of the JVM. (Note that this does not | |
63 // apply to TLAB allocation, which is not part of this interface: it | |
64 // is done by clients of this interface.) | |
342 | 65 |
66 // Local to this file. | |
67 | |
68 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
69 SuspendibleThreadSet* _sts; | |
70 G1RemSet* _g1rs; | |
71 ConcurrentG1Refine* _cg1r; | |
72 bool _concurrent; | |
73 public: | |
74 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
75 G1RemSet* g1rs, | |
76 ConcurrentG1Refine* cg1r) : | |
77 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
78 {} | |
79 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 80 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
81 // This path is executed by the concurrent refine or mutator threads, | |
82 // concurrently, and so we do not care if card_ptr contains references | |
83 // that point into the collection set. | |
84 assert(!oops_into_cset, "should be"); | |
85 | |
342 | 86 if (_concurrent && _sts->should_yield()) { |
87 // Caller will actually yield. | |
88 return false; | |
89 } | |
90 // Otherwise, we finished successfully; return true. | |
91 return true; | |
92 } | |
93 void set_concurrent(bool b) { _concurrent = b; } | |
94 }; | |
95 | |
96 | |
97 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
98 int _calls; | |
99 G1CollectedHeap* _g1h; | |
100 CardTableModRefBS* _ctbs; | |
101 int _histo[256]; | |
102 public: | |
103 ClearLoggedCardTableEntryClosure() : | |
104 _calls(0) | |
105 { | |
106 _g1h = G1CollectedHeap::heap(); | |
107 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
108 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
109 } | |
110 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
111 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
112 _calls++; | |
113 unsigned char* ujb = (unsigned char*)card_ptr; | |
114 int ind = (int)(*ujb); | |
115 _histo[ind]++; | |
116 *card_ptr = -1; | |
117 } | |
118 return true; | |
119 } | |
120 int calls() { return _calls; } | |
121 void print_histo() { | |
122 gclog_or_tty->print_cr("Card table value histogram:"); | |
123 for (int i = 0; i < 256; i++) { | |
124 if (_histo[i] != 0) { | |
125 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
126 } | |
127 } | |
128 } | |
129 }; | |
130 | |
131 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
132 int _calls; | |
133 G1CollectedHeap* _g1h; | |
134 CardTableModRefBS* _ctbs; | |
135 public: | |
136 RedirtyLoggedCardTableEntryClosure() : | |
137 _calls(0) | |
138 { | |
139 _g1h = G1CollectedHeap::heap(); | |
140 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
141 } | |
142 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
143 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
144 _calls++; | |
145 *card_ptr = 0; | |
146 } | |
147 return true; | |
148 } | |
149 int calls() { return _calls; } | |
150 }; | |
151 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
152 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 |
342 | 160 YoungList::YoungList(G1CollectedHeap* g1h) |
161 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
162 _length(0), |
342 | 163 _last_sampled_rs_lengths(0), |
545 | 164 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 165 { |
166 guarantee( check_list_empty(false), "just making sure..." ); | |
167 } | |
168 | |
169 void YoungList::push_region(HeapRegion *hr) { | |
170 assert(!hr->is_young(), "should not already be young"); | |
171 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
172 | |
173 hr->set_next_young_region(_head); | |
174 _head = hr; | |
175 | |
176 hr->set_young(); | |
177 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
178 ++_length; | |
179 } | |
180 | |
181 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 182 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 183 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
184 | |
185 hr->set_next_young_region(_survivor_head); | |
186 if (_survivor_head == NULL) { | |
545 | 187 _survivor_tail = hr; |
342 | 188 } |
189 _survivor_head = hr; | |
190 | |
191 ++_survivor_length; | |
192 } | |
193 | |
194 void YoungList::empty_list(HeapRegion* list) { | |
195 while (list != NULL) { | |
196 HeapRegion* next = list->get_next_young_region(); | |
197 list->set_next_young_region(NULL); | |
198 list->uninstall_surv_rate_group(); | |
199 list->set_not_young(); | |
200 list = next; | |
201 } | |
202 } | |
203 | |
204 void YoungList::empty_list() { | |
205 assert(check_list_well_formed(), "young list should be well formed"); | |
206 | |
207 empty_list(_head); | |
208 _head = NULL; | |
209 _length = 0; | |
210 | |
211 empty_list(_survivor_head); | |
212 _survivor_head = NULL; | |
545 | 213 _survivor_tail = NULL; |
342 | 214 _survivor_length = 0; |
215 | |
216 _last_sampled_rs_lengths = 0; | |
217 | |
218 assert(check_list_empty(false), "just making sure..."); | |
219 } | |
220 | |
221 bool YoungList::check_list_well_formed() { | |
222 bool ret = true; | |
223 | |
224 size_t length = 0; | |
225 HeapRegion* curr = _head; | |
226 HeapRegion* last = NULL; | |
227 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
228 if (!curr->is_young()) { |
342 | 229 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
230 "incorrectly tagged (y: %d, surv: %d)", |
342 | 231 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
232 curr->is_young(), curr->is_survivor()); |
342 | 233 ret = false; |
234 } | |
235 ++length; | |
236 last = curr; | |
237 curr = curr->get_next_young_region(); | |
238 } | |
239 ret = ret && (length == _length); | |
240 | |
241 if (!ret) { | |
242 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
243 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
244 length, _length); | |
245 } | |
246 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
247 return ret; |
342 | 248 } |
249 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
250 bool YoungList::check_list_empty(bool check_sample) { |
342 | 251 bool ret = true; |
252 | |
253 if (_length != 0) { | |
254 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
255 _length); | |
256 ret = false; | |
257 } | |
258 if (check_sample && _last_sampled_rs_lengths != 0) { | |
259 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
260 ret = false; | |
261 } | |
262 if (_head != NULL) { | |
263 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
264 ret = false; | |
265 } | |
266 if (!ret) { | |
267 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
268 } | |
269 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
270 return ret; |
342 | 271 } |
272 | |
273 void | |
274 YoungList::rs_length_sampling_init() { | |
275 _sampled_rs_lengths = 0; | |
276 _curr = _head; | |
277 } | |
278 | |
279 bool | |
280 YoungList::rs_length_sampling_more() { | |
281 return _curr != NULL; | |
282 } | |
283 | |
284 void | |
285 YoungList::rs_length_sampling_next() { | |
286 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
287 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 |
342 | 299 _curr = _curr->get_next_young_region(); |
300 if (_curr == NULL) { | |
301 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
302 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
303 } | |
304 } | |
305 | |
306 void | |
307 YoungList::reset_auxilary_lists() { | |
308 guarantee( is_empty(), "young list should be empty" ); | |
309 assert(check_list_well_formed(), "young list should be well formed"); | |
310 | |
311 // Add survivor regions to SurvRateGroup. | |
312 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 313 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
314 |
342 | 315 for (HeapRegion* curr = _survivor_head; |
316 curr != NULL; | |
317 curr = curr->get_next_young_region()) { | |
318 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
319 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 324 } |
325 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
326 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
327 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _length = _survivor_length; |
342 | 329 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
330 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 |
545 | 340 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 341 |
342 assert(check_list_well_formed(), "young list should be well formed"); | |
343 } | |
344 | |
345 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
346 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 348 |
349 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
350 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
351 HeapRegion *curr = lists[list]; | |
352 if (curr == NULL) | |
353 gclog_or_tty->print_cr(" empty"); | |
354 while (curr != NULL) { | |
355 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
356 "age: %4d, y: %d, surv: %d", |
342 | 357 curr->bottom(), curr->end(), |
358 curr->top(), | |
359 curr->prev_top_at_mark_start(), | |
360 curr->next_top_at_mark_start(), | |
361 curr->top_at_conc_mark_count(), | |
362 curr->age_in_surv_rate_group_cond(), | |
363 curr->is_young(), | |
364 curr->is_survivor()); | |
365 curr = curr->get_next_young_region(); | |
366 } | |
367 } | |
368 | |
369 gclog_or_tty->print_cr(""); | |
370 } | |
371 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
372 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 |
342 | 425 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 426 _cg1r->stop(); |
342 | 427 _cmThread->stop(); |
428 } | |
429 | |
430 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
431 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
432 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
433 | |
434 // Count the dirty cards at the start. | |
435 CountNonCleanMemRegionClosure count1(this); | |
436 ct_bs->mod_card_iterate(&count1); | |
437 int orig_count = count1.n(); | |
438 | |
439 // First clear the logged cards. | |
440 ClearLoggedCardTableEntryClosure clear; | |
441 dcqs.set_closure(&clear); | |
442 dcqs.apply_closure_to_all_completed_buffers(); | |
443 dcqs.iterate_closure_all_threads(false); | |
444 clear.print_histo(); | |
445 | |
446 // Now ensure that there's no dirty cards. | |
447 CountNonCleanMemRegionClosure count2(this); | |
448 ct_bs->mod_card_iterate(&count2); | |
449 if (count2.n() != 0) { | |
450 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
451 count2.n(), orig_count); | |
452 } | |
453 guarantee(count2.n() == 0, "Card table should be clean."); | |
454 | |
455 RedirtyLoggedCardTableEntryClosure redirty; | |
456 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
457 dcqs.apply_closure_to_all_completed_buffers(); | |
458 dcqs.iterate_closure_all_threads(false); | |
459 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
460 clear.calls(), orig_count); | |
461 guarantee(redirty.calls() == clear.calls(), | |
462 "Or else mechanism is broken."); | |
463 | |
464 CountNonCleanMemRegionClosure count3(this); | |
465 ct_bs->mod_card_iterate(&count3); | |
466 if (count3.n() != orig_count) { | |
467 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
468 orig_count, count3.n()); | |
469 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
470 } | |
471 | |
472 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
473 } | |
474 | |
475 // Private class members. | |
476 | |
477 G1CollectedHeap* G1CollectedHeap::_g1h; | |
478 | |
479 // Private methods. | |
480 | |
2152 | 481 HeapRegion* |
482 G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) { | |
483 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
484 while (!_secondary_free_list.is_empty() || free_regions_coming()) { | |
485 if (!_secondary_free_list.is_empty()) { | |
486 if (G1ConcRegionFreeingVerbose) { | |
487 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
488 "secondary_free_list has "SIZE_FORMAT" entries", | |
489 _secondary_free_list.length()); | |
490 } | |
491 // It looks as if there are free regions available on the | |
492 // secondary_free_list. Let's move them to the free_list and try | |
493 // again to allocate from it. | |
494 append_secondary_free_list(); | |
495 | |
496 assert(!_free_list.is_empty(), "if the secondary_free_list was not " | |
497 "empty we should have moved at least one entry to the free_list"); | |
498 HeapRegion* res = _free_list.remove_head(); | |
499 if (G1ConcRegionFreeingVerbose) { | |
500 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
501 "allocated "HR_FORMAT" from secondary_free_list", | |
502 HR_FORMAT_PARAMS(res)); | |
503 } | |
504 return res; | |
505 } | |
506 | |
507 // Wait here until we get notifed either when (a) there are no | |
508 // more free regions coming or (b) some regions have been moved on | |
509 // the secondary_free_list. | |
510 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
511 } | |
512 | |
513 if (G1ConcRegionFreeingVerbose) { | |
514 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
515 "could not allocate from secondary_free_list"); | |
516 } | |
517 return NULL; | |
518 } | |
519 | |
520 HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, | |
521 bool do_expand) { | |
522 assert(!isHumongous(word_size) || | |
523 word_size <= (size_t) HeapRegion::GrainWords, | |
524 "the only time we use this to allocate a humongous region is " | |
525 "when we are allocating a single humongous region"); | |
526 | |
527 HeapRegion* res; | |
528 if (G1StressConcRegionFreeing) { | |
529 if (!_secondary_free_list.is_empty()) { | |
530 if (G1ConcRegionFreeingVerbose) { | |
531 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
532 "forced to look at the secondary_free_list"); | |
533 } | |
534 res = new_region_try_secondary_free_list(word_size); | |
535 if (res != NULL) { | |
536 return res; | |
537 } | |
538 } | |
539 } | |
540 res = _free_list.remove_head_or_null(); | |
541 if (res == NULL) { | |
542 if (G1ConcRegionFreeingVerbose) { | |
543 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " | |
544 "res == NULL, trying the secondary_free_list"); | |
545 } | |
546 res = new_region_try_secondary_free_list(word_size); | |
547 } | |
342 | 548 if (res == NULL && do_expand) { |
549 expand(word_size * HeapWordSize); | |
2152 | 550 res = _free_list.remove_head_or_null(); |
342 | 551 } |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
552 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
553 if (G1PrintHeapRegions) { |
2152 | 554 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " |
555 "top "PTR_FORMAT, res->hrs_index(), | |
556 res->bottom(), res->end(), res->top()); | |
342 | 557 } |
558 } | |
559 return res; | |
560 } | |
561 | |
2152 | 562 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, |
563 size_t word_size) { | |
342 | 564 HeapRegion* alloc_region = NULL; |
565 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
2152 | 566 alloc_region = new_region_work(word_size, true /* do_expand */); |
342 | 567 if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
545 | 568 alloc_region->set_survivor(); |
342 | 569 } |
570 ++_gc_alloc_region_counts[purpose]; | |
571 } else { | |
572 g1_policy()->note_alloc_region_limit_reached(purpose); | |
573 } | |
574 return alloc_region; | |
575 } | |
576 | |
2152 | 577 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, |
578 size_t word_size) { | |
579 int first = -1; | |
580 if (num_regions == 1) { | |
581 // Only one region to allocate, no need to go through the slower | |
582 // path. The caller will attempt the expasion if this fails, so | |
583 // let's not try to expand here too. | |
584 HeapRegion* hr = new_region_work(word_size, false /* do_expand */); | |
585 if (hr != NULL) { | |
586 first = hr->hrs_index(); | |
587 } else { | |
588 first = -1; | |
589 } | |
590 } else { | |
591 // We can't allocate humongous regions while cleanupComplete() is | |
592 // running, since some of the regions we find to be empty might not | |
593 // yet be added to the free list and it is not straightforward to | |
594 // know which list they are on so that we can remove them. Note | |
595 // that we only need to do this if we need to allocate more than | |
596 // one region to satisfy the current humongous allocation | |
597 // request. If we are only allocating one region we use the common | |
598 // region allocation code (see above). | |
599 wait_while_free_regions_coming(); | |
600 append_secondary_free_list_if_not_empty(); | |
601 | |
602 if (free_regions() >= num_regions) { | |
603 first = _hrs->find_contiguous(num_regions); | |
604 if (first != -1) { | |
605 for (int i = first; i < first + (int) num_regions; ++i) { | |
606 HeapRegion* hr = _hrs->at(i); | |
607 assert(hr->is_empty(), "sanity"); | |
608 assert(is_on_free_list(hr), "sanity"); | |
609 hr->set_pending_removal(true); | |
610 } | |
611 _free_list.remove_all_pending(num_regions); | |
612 } | |
613 } | |
614 } | |
615 return first; | |
616 } | |
617 | |
342 | 618 // If could fit into free regions w/o expansion, try. |
619 // Otherwise, if can expand, do so. | |
620 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 621 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
2152 | 622 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
623 | |
624 verify_region_sets_optional(); | |
342 | 625 |
626 size_t num_regions = | |
1973 | 627 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 628 size_t x_size = expansion_regions(); |
2152 | 629 size_t fs = _hrs->free_suffix(); |
630 int first = humongous_obj_allocate_find_first(num_regions, word_size); | |
631 if (first == -1) { | |
632 // The only thing we can do now is attempt expansion. | |
342 | 633 if (fs + x_size >= num_regions) { |
634 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
2152 | 635 first = humongous_obj_allocate_find_first(num_regions, word_size); |
636 assert(first != -1, "this should have worked"); | |
637 } | |
638 } | |
639 | |
640 if (first != -1) { | |
641 // Index of last region in the series + 1. | |
642 int last = first + (int) num_regions; | |
643 | |
644 // We need to initialize the region(s) we just discovered. This is | |
645 // a bit tricky given that it can happen concurrently with | |
646 // refinement threads refining cards on these regions and | |
647 // potentially wanting to refine the BOT as they are scanning | |
648 // those cards (this can happen shortly after a cleanup; see CR | |
649 // 6991377). So we have to set up the region(s) carefully and in | |
650 // a specific order. | |
651 | |
652 // The word size sum of all the regions we will allocate. | |
653 size_t word_size_sum = num_regions * HeapRegion::GrainWords; | |
654 assert(word_size <= word_size_sum, "sanity"); | |
655 | |
656 // This will be the "starts humongous" region. | |
657 HeapRegion* first_hr = _hrs->at(first); | |
658 // The header of the new object will be placed at the bottom of | |
659 // the first region. | |
660 HeapWord* new_obj = first_hr->bottom(); | |
661 // This will be the new end of the first region in the series that | |
662 // should also match the end of the last region in the seriers. | |
663 HeapWord* new_end = new_obj + word_size_sum; | |
664 // This will be the new top of the first region that will reflect | |
665 // this allocation. | |
666 HeapWord* new_top = new_obj + word_size; | |
667 | |
668 // First, we need to zero the header of the space that we will be | |
669 // allocating. When we update top further down, some refinement | |
670 // threads might try to scan the region. By zeroing the header we | |
671 // ensure that any thread that will try to scan the region will | |
672 // come across the zero klass word and bail out. | |
673 // | |
674 // NOTE: It would not have been correct to have used | |
675 // CollectedHeap::fill_with_object() and make the space look like | |
676 // an int array. The thread that is doing the allocation will | |
677 // later update the object header to a potentially different array | |
678 // type and, for a very short period of time, the klass and length | |
679 // fields will be inconsistent. This could cause a refinement | |
680 // thread to calculate the object size incorrectly. | |
681 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); | |
682 | |
683 // We will set up the first region as "starts humongous". This | |
684 // will also update the BOT covering all the regions to reflect | |
685 // that there is a single object that starts at the bottom of the | |
686 // first region. | |
687 first_hr->set_startsHumongous(new_top, new_end); | |
688 | |
689 // Then, if there are any, we will set up the "continues | |
690 // humongous" regions. | |
691 HeapRegion* hr = NULL; | |
692 for (int i = first + 1; i < last; ++i) { | |
693 hr = _hrs->at(i); | |
694 hr->set_continuesHumongous(first_hr); | |
695 } | |
696 // If we have "continues humongous" regions (hr != NULL), then the | |
697 // end of the last one should match new_end. | |
698 assert(hr == NULL || hr->end() == new_end, "sanity"); | |
699 | |
700 // Up to this point no concurrent thread would have been able to | |
701 // do any scanning on any region in this series. All the top | |
702 // fields still point to bottom, so the intersection between | |
703 // [bottom,top] and [card_start,card_end] will be empty. Before we | |
704 // update the top fields, we'll do a storestore to make sure that | |
705 // no thread sees the update to top before the zeroing of the | |
706 // object header and the BOT initialization. | |
707 OrderAccess::storestore(); | |
708 | |
709 // Now that the BOT and the object header have been initialized, | |
710 // we can update top of the "starts humongous" region. | |
711 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), | |
712 "new_top should be in this region"); | |
713 first_hr->set_top(new_top); | |
714 | |
715 // Now, we will update the top fields of the "continues humongous" | |
716 // regions. The reason we need to do this is that, otherwise, | |
717 // these regions would look empty and this will confuse parts of | |
718 // G1. For example, the code that looks for a consecutive number | |
719 // of empty regions will consider them empty and try to | |
720 // re-allocate them. We can extend is_empty() to also include | |
721 // !continuesHumongous(), but it is easier to just update the top | |
722 // fields here. The way we set top for all regions (i.e., top == | |
723 // end for all regions but the last one, top == new_top for the | |
724 // last one) is actually used when we will free up the humongous | |
725 // region in free_humongous_region(). | |
726 hr = NULL; | |
727 for (int i = first + 1; i < last; ++i) { | |
728 hr = _hrs->at(i); | |
729 if ((i + 1) == last) { | |
730 // last continues humongous region | |
731 assert(hr->bottom() < new_top && new_top <= hr->end(), | |
732 "new_top should fall on this region"); | |
733 hr->set_top(new_top); | |
734 } else { | |
735 // not last one | |
736 assert(new_top > hr->end(), "new_top should be above this region"); | |
737 hr->set_top(hr->end()); | |
342 | 738 } |
739 } | |
2152 | 740 // If we have continues humongous regions (hr != NULL), then the |
741 // end of the last one should match new_end and its top should | |
742 // match new_top. | |
743 assert(hr == NULL || | |
744 (hr->end() == new_end && hr->top() == new_top), "sanity"); | |
745 | |
746 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); | |
747 _summary_bytes_used += first_hr->used(); | |
748 _humongous_set.add(first_hr); | |
749 | |
750 return new_obj; | |
751 } | |
752 | |
753 verify_region_sets_optional(); | |
754 return NULL; | |
342 | 755 } |
756 | |
1973 | 757 void |
758 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
759 // Other threads might still be trying to allocate using CASes out |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
760 // of the region we are retiring, as they can do so without holding |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
761 // the Heap_lock. So we first have to make sure that noone else can |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
762 // allocate in it by doing a maximal allocation. Even if our CAS |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
763 // attempt fails a few times, we'll succeed sooner or later given |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
764 // that a failed CAS attempt mean that the region is getting closed |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
765 // to being full (someone else succeeded in allocating into it). |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
766 size_t free_word_size = cur_alloc_region->free() / HeapWordSize; |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
767 |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
768 // This is the minimum free chunk we can turn into a dummy |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
769 // object. If the free space falls below this, then noone can |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
770 // allocate in this region anyway (all allocation requests will be |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
771 // of a size larger than this) so we won't have to perform the dummy |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
772 // allocation. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
773 size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
774 |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
775 while (free_word_size >= min_word_size_to_fill) { |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
776 HeapWord* dummy = |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
777 cur_alloc_region->par_allocate_no_bot_updates(free_word_size); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
778 if (dummy != NULL) { |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
779 // If the allocation was successful we should fill in the space. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
780 CollectedHeap::fill_with_object(dummy, free_word_size); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
781 break; |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
782 } |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
783 |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
784 free_word_size = cur_alloc_region->free() / HeapWordSize; |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
785 // It's also possible that someone else beats us to the |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
786 // allocation and they fill up the region. In that case, we can |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
787 // just get out of the loop |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
788 } |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
789 assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill, |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
790 "sanity"); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
791 |
1973 | 792 retire_cur_alloc_region_common(cur_alloc_region); |
793 assert(_cur_alloc_region == NULL, "post-condition"); | |
794 } | |
795 | |
796 // See the comment in the .hpp file about the locking protocol and | |
797 // assumptions of this method (and other related ones). | |
342 | 798 HeapWord* |
1973 | 799 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, |
800 bool at_safepoint, | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
801 bool do_dirtying, |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
802 bool can_expand) { |
2152 | 803 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
1973 | 804 assert(_cur_alloc_region == NULL, |
805 "replace_cur_alloc_region_and_allocate() should only be called " | |
806 "after retiring the previous current alloc region"); | |
807 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
808 "at_safepoint and is_at_safepoint() should be a tautology"); | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
809 assert(!can_expand || g1_policy()->can_expand_young_list(), |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
810 "we should not call this method with can_expand == true if " |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
811 "we are not allowed to expand the young gen"); |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
812 |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
813 if (can_expand || !g1_policy()->is_young_list_full()) { |
2152 | 814 HeapRegion* new_cur_alloc_region = new_alloc_region(word_size); |
1973 | 815 if (new_cur_alloc_region != NULL) { |
816 assert(new_cur_alloc_region->is_empty(), | |
817 "the newly-allocated region should be empty, " | |
818 "as right now we only allocate new regions out of the free list"); | |
819 g1_policy()->update_region_num(true /* next_is_young */); | |
820 set_region_short_lived_locked(new_cur_alloc_region); | |
821 | |
822 assert(!new_cur_alloc_region->isHumongous(), | |
823 "Catch a regression of this bug."); | |
824 | |
825 // We need to ensure that the stores to _cur_alloc_region and, | |
826 // subsequently, to top do not float above the setting of the | |
827 // young type. | |
828 OrderAccess::storestore(); | |
829 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
830 // Now, perform the allocation out of the region we just |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
831 // allocated. Note that noone else can access that region at |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
832 // this point (as _cur_alloc_region has not been updated yet), |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
833 // so we can just go ahead and do the allocation without any |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
834 // atomics (and we expect this allocation attempt to |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
835 // suceeded). Given that other threads can attempt an allocation |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
836 // with a CAS and without needing the Heap_lock, if we assigned |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
837 // the new region to _cur_alloc_region before first allocating |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
838 // into it other threads might have filled up the new region |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
839 // before we got a chance to do the allocation ourselves. In |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
840 // that case, we would have needed to retire the region, grab a |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
841 // new one, and go through all this again. Allocating out of the |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
842 // new region before assigning it to _cur_alloc_region avoids |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
843 // all this. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
844 HeapWord* result = |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
845 new_cur_alloc_region->allocate_no_bot_updates(word_size); |
1973 | 846 assert(result != NULL, "we just allocate out of an empty region " |
847 "so allocation should have been successful"); | |
848 assert(is_in(result), "result should be in the heap"); | |
849 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
850 // Now make sure that the store to _cur_alloc_region does not |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
851 // float above the store to top. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
852 OrderAccess::storestore(); |
1973 | 853 _cur_alloc_region = new_cur_alloc_region; |
854 | |
855 if (!at_safepoint) { | |
856 Heap_lock->unlock(); | |
857 } | |
858 | |
859 // do the dirtying, if necessary, after we release the Heap_lock | |
860 if (do_dirtying) { | |
861 dirty_young_block(result, word_size); | |
862 } | |
863 return result; | |
864 } | |
865 } | |
866 | |
867 assert(_cur_alloc_region == NULL, "we failed to allocate a new current " | |
868 "alloc region, it should still be NULL"); | |
2152 | 869 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
1973 | 870 return NULL; |
871 } | |
872 | |
873 // See the comment in the .hpp file about the locking protocol and | |
874 // assumptions of this method (and other related ones). | |
875 HeapWord* | |
876 G1CollectedHeap::attempt_allocation_slow(size_t word_size) { | |
877 assert_heap_locked_and_not_at_safepoint(); | |
878 assert(!isHumongous(word_size), "attempt_allocation_slow() should not be " | |
879 "used for humongous allocations"); | |
880 | |
2152 | 881 // We should only reach here when we were unable to allocate |
882 // otherwise. So, we should have not active current alloc region. | |
883 assert(_cur_alloc_region == NULL, "current alloc region should be NULL"); | |
884 | |
1973 | 885 // We will loop while succeeded is false, which means that we tried |
886 // to do a collection, but the VM op did not succeed. So, when we | |
887 // exit the loop, either one of the allocation attempts was | |
888 // successful, or we succeeded in doing the VM op but which was | |
889 // unable to allocate after the collection. | |
890 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
891 bool succeeded = true; | |
892 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
893 // Every time we go round the loop we should be holding the Heap_lock. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
894 assert_heap_locked(); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
895 |
1973 | 896 if (GC_locker::is_active_and_needs_gc()) { |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
897 // We are locked out of GC because of the GC locker. We can |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
898 // allocate a new region only if we can expand the young gen. |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
899 |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
900 if (g1_policy()->can_expand_young_list()) { |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
901 // Yes, we are allowed to expand the young gen. Let's try to |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
902 // allocate a new current alloc region. |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
903 HeapWord* result = |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
904 replace_cur_alloc_region_and_allocate(word_size, |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
905 false, /* at_safepoint */ |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
906 true, /* do_dirtying */ |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
907 true /* can_expand */); |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
908 if (result != NULL) { |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
909 assert_heap_not_locked(); |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
910 return result; |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
911 } |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
912 } |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
913 // We could not expand the young gen further (or we could but we |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
914 // failed to allocate a new region). We'll stall until the GC |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
915 // locker forces a GC. |
1973 | 916 |
917 // If this thread is not in a jni critical section, we stall | |
918 // the requestor until the critical section has cleared and | |
919 // GC allowed. When the critical section clears, a GC is | |
920 // initiated by the last thread exiting the critical section; so | |
921 // we retry the allocation sequence from the beginning of the loop, | |
922 // rather than causing more, now probably unnecessary, GC attempts. | |
923 JavaThread* jthr = JavaThread::current(); | |
924 assert(jthr != NULL, "sanity"); | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
925 if (jthr->in_critical()) { |
1973 | 926 if (CheckJNICalls) { |
927 fatal("Possible deadlock due to allocating while" | |
928 " in jni critical section"); | |
929 } | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
930 // We are returning NULL so the protocol is that we're still |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
931 // holding the Heap_lock. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
932 assert_heap_locked(); |
1973 | 933 return NULL; |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
934 } |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
935 |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
936 Heap_lock->unlock(); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
937 GC_locker::stall_until_clear(); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
938 |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
939 // No need to relock the Heap_lock. We'll fall off to the code |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
940 // below the else-statement which assumes that we are not |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
941 // holding the Heap_lock. |
1973 | 942 } else { |
943 // We are not locked out. So, let's try to do a GC. The VM op | |
944 // will retry the allocation before it completes. | |
945 | |
946 // Read the GC count while holding the Heap_lock | |
947 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
948 | |
949 Heap_lock->unlock(); | |
950 | |
951 HeapWord* result = | |
952 do_collection_pause(word_size, gc_count_before, &succeeded); | |
953 assert_heap_not_locked(); | |
954 if (result != NULL) { | |
955 assert(succeeded, "the VM op should have succeeded"); | |
956 | |
957 // Allocations that take place on VM operations do not do any | |
958 // card dirtying and we have to do it here. | |
959 dirty_young_block(result, word_size); | |
960 return result; | |
961 } | |
962 } | |
963 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
964 // Both paths that get us here from above unlock the Heap_lock. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
965 assert_heap_not_locked(); |
1973 | 966 |
967 // We can reach here when we were unsuccessful in doing a GC, | |
968 // because another thread beat us to it, or because we were locked | |
969 // out of GC due to the GC locker. In either case a new alloc | |
970 // region might be available so we will retry the allocation. | |
971 HeapWord* result = attempt_allocation(word_size); | |
972 if (result != NULL) { | |
973 assert_heap_not_locked(); | |
974 return result; | |
975 } | |
976 | |
977 // So far our attempts to allocate failed. The only time we'll go | |
978 // around the loop and try again is if we tried to do a GC and the | |
979 // VM op that we tried to schedule was not successful because | |
980 // another thread beat us to it. If that happened it's possible | |
981 // that by the time we grabbed the Heap_lock again and tried to | |
982 // allocate other threads filled up the young generation, which | |
983 // means that the allocation attempt after the GC also failed. So, | |
984 // it's worth trying to schedule another GC pause. | |
985 if (succeeded) { | |
986 break; | |
987 } | |
988 | |
989 // Give a warning if we seem to be looping forever. | |
990 if ((QueuedAllocationWarningCount > 0) && | |
991 (try_count % QueuedAllocationWarningCount == 0)) { | |
992 warning("G1CollectedHeap::attempt_allocation_slow() " | |
993 "retries %d times", try_count); | |
342 | 994 } |
995 } | |
996 | |
1973 | 997 assert_heap_locked(); |
998 return NULL; | |
999 } | |
1000 | |
1001 // See the comment in the .hpp file about the locking protocol and | |
1002 // assumptions of this method (and other related ones). | |
1003 HeapWord* | |
1004 G1CollectedHeap::attempt_allocation_humongous(size_t word_size, | |
1005 bool at_safepoint) { | |
1006 // This is the method that will allocate a humongous object. All | |
1007 // allocation paths that attempt to allocate a humongous object | |
1008 // should eventually reach here. Currently, the only paths are from | |
1009 // mem_allocate() and attempt_allocation_at_safepoint(). | |
2152 | 1010 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1011 assert(isHumongous(word_size), "attempt_allocation_humongous() " |
1012 "should only be used for humongous allocations"); | |
1013 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
1014 "at_safepoint and is_at_safepoint() should be a tautology"); | |
1015 | |
1016 HeapWord* result = NULL; | |
1017 | |
1018 // We will loop while succeeded is false, which means that we tried | |
1019 // to do a collection, but the VM op did not succeed. So, when we | |
1020 // exit the loop, either one of the allocation attempts was | |
1021 // successful, or we succeeded in doing the VM op but which was | |
1022 // unable to allocate after the collection. | |
1023 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
1024 bool succeeded = true; | |
1025 | |
1026 // Given that humongous objects are not allocated in young | |
1027 // regions, we'll first try to do the allocation without doing a | |
1028 // collection hoping that there's enough space in the heap. | |
1029 result = humongous_obj_allocate(word_size); | |
1030 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
1031 "catch a regression of this bug."); | |
1032 if (result != NULL) { | |
1033 if (!at_safepoint) { | |
1034 // If we're not at a safepoint, unlock the Heap_lock. | |
1035 Heap_lock->unlock(); | |
1036 } | |
1037 return result; | |
1038 } | |
1039 | |
1040 // If we failed to allocate the humongous object, we should try to | |
1041 // do a collection pause (if we're allowed) in case it reclaims | |
1042 // enough space for the allocation to succeed after the pause. | |
1043 if (!at_safepoint) { | |
1044 // Read the GC count while holding the Heap_lock | |
1045 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
1046 | |
1047 // If we're allowed to do a collection we're not at a | |
1048 // safepoint, so it is safe to unlock the Heap_lock. | |
342 | 1049 Heap_lock->unlock(); |
1973 | 1050 |
1051 result = do_collection_pause(word_size, gc_count_before, &succeeded); | |
1052 assert_heap_not_locked(); | |
1053 if (result != NULL) { | |
1054 assert(succeeded, "the VM op should have succeeded"); | |
1055 return result; | |
1056 } | |
1057 | |
1058 // If we get here, the VM operation either did not succeed | |
1059 // (i.e., another thread beat us to it) or it succeeded but | |
1060 // failed to allocate the object. | |
1061 | |
1062 // If we're allowed to do a collection we're not at a | |
1063 // safepoint, so it is safe to lock the Heap_lock. | |
1064 Heap_lock->lock(); | |
1065 } | |
1066 | |
1067 assert(result == NULL, "otherwise we should have exited the loop earlier"); | |
1068 | |
1069 // So far our attempts to allocate failed. The only time we'll go | |
1070 // around the loop and try again is if we tried to do a GC and the | |
1071 // VM op that we tried to schedule was not successful because | |
1072 // another thread beat us to it. That way it's possible that some | |
1073 // space was freed up by the thread that successfully scheduled a | |
1074 // GC. So it's worth trying to allocate again. | |
1075 if (succeeded) { | |
1076 break; | |
342 | 1077 } |
1078 | |
1973 | 1079 // Give a warning if we seem to be looping forever. |
1080 if ((QueuedAllocationWarningCount > 0) && | |
1081 (try_count % QueuedAllocationWarningCount == 0)) { | |
1082 warning("G1CollectedHeap::attempt_allocation_humongous " | |
1083 "retries %d times", try_count); | |
1084 } | |
1085 } | |
1086 | |
2152 | 1087 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1088 return NULL; |
1089 } | |
1090 | |
1091 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, | |
1092 bool expect_null_cur_alloc_region) { | |
2152 | 1093 assert_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1094 assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region, |
1975
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
1095 err_msg("the current alloc region was unexpectedly found " |
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
1096 "to be non-NULL, cur alloc region: "PTR_FORMAT" " |
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
1097 "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT, |
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
1098 _cur_alloc_region, expect_null_cur_alloc_region, word_size)); |
1973 | 1099 |
1100 if (!isHumongous(word_size)) { | |
1101 if (!expect_null_cur_alloc_region) { | |
1102 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
1103 if (cur_alloc_region != NULL) { | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1104 // We are at a safepoint so no reason to use the MT-safe version. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1105 HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size); |
1973 | 1106 if (result != NULL) { |
1107 assert(is_in(result), "result should be in the heap"); | |
1108 | |
1109 // We will not do any dirtying here. This is guaranteed to be | |
1110 // called during a safepoint and the thread that scheduled the | |
1111 // pause will do the dirtying if we return a non-NULL result. | |
1112 return result; | |
1113 } | |
1114 | |
1115 retire_cur_alloc_region_common(cur_alloc_region); | |
1116 } | |
342 | 1117 } |
1973 | 1118 |
1119 assert(_cur_alloc_region == NULL, | |
1120 "at this point we should have no cur alloc region"); | |
1121 return replace_cur_alloc_region_and_allocate(word_size, | |
1122 true, /* at_safepoint */ | |
1991
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
1123 false /* do_dirtying */, |
016a3628c885
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
tonyp
parents:
1975
diff
changeset
|
1124 false /* can_expand */); |
1973 | 1125 } else { |
1126 return attempt_allocation_humongous(word_size, | |
1127 true /* at_safepoint */); | |
1128 } | |
1129 | |
1130 ShouldNotReachHere(); | |
1131 } | |
1132 | |
1133 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { | |
1134 assert_heap_not_locked_and_not_at_safepoint(); | |
1135 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); | |
1136 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1137 // First attempt: Try allocating out of the current alloc region |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1138 // using a CAS. If that fails, take the Heap_lock and retry the |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1139 // allocation, potentially replacing the current alloc region. |
1973 | 1140 HeapWord* result = attempt_allocation(word_size); |
1141 if (result != NULL) { | |
1142 assert_heap_not_locked(); | |
1143 return result; | |
1144 } | |
1145 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1146 // Second attempt: Go to the slower path where we might try to |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1147 // schedule a collection. |
1973 | 1148 result = attempt_allocation_slow(word_size); |
1149 if (result != NULL) { | |
1150 assert_heap_not_locked(); | |
1151 return result; | |
1152 } | |
1153 | |
1154 assert_heap_locked(); | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1155 // Need to unlock the Heap_lock before returning. |
1973 | 1156 Heap_lock->unlock(); |
1157 return NULL; | |
342 | 1158 } |
1159 | |
1160 HeapWord* | |
1161 G1CollectedHeap::mem_allocate(size_t word_size, | |
1162 bool is_noref, | |
1163 bool is_tlab, | |
1973 | 1164 bool* gc_overhead_limit_was_exceeded) { |
1165 assert_heap_not_locked_and_not_at_safepoint(); | |
1166 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
1167 "to allocate TLABs"); | |
342 | 1168 |
1169 // Loop until the allocation is satisified, | |
1170 // or unsatisfied after GC. | |
1973 | 1171 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1172 unsigned int gc_count_before; | |
342 | 1173 { |
1973 | 1174 if (!isHumongous(word_size)) { |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1175 // First attempt: Try allocating out of the current alloc region |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1176 // using a CAS. If that fails, take the Heap_lock and retry the |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1177 // allocation, potentially replacing the current alloc region. |
1973 | 1178 HeapWord* result = attempt_allocation(word_size); |
1179 if (result != NULL) { | |
1180 assert_heap_not_locked(); | |
1181 return result; | |
1182 } | |
1183 | |
1184 assert_heap_locked(); | |
1185 | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1186 // Second attempt: Go to the slower path where we might try to |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1187 // schedule a collection. |
1973 | 1188 result = attempt_allocation_slow(word_size); |
1189 if (result != NULL) { | |
1190 assert_heap_not_locked(); | |
1191 return result; | |
1192 } | |
1193 } else { | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1194 // attempt_allocation_humongous() requires the Heap_lock to be held. |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1195 Heap_lock->lock(); |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1196 |
1973 | 1197 HeapWord* result = attempt_allocation_humongous(word_size, |
1198 false /* at_safepoint */); | |
1199 if (result != NULL) { | |
1200 assert_heap_not_locked(); | |
1201 return result; | |
1202 } | |
342 | 1203 } |
1973 | 1204 |
1205 assert_heap_locked(); | |
342 | 1206 // Read the gc count while the heap lock is held. |
1207 gc_count_before = SharedHeap::heap()->total_collections(); | |
2134
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1208 |
b158bed62ef5
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
2133
diff
changeset
|
1209 // Release the Heap_lock before attempting the collection. |
342 | 1210 Heap_lock->unlock(); |
1211 } | |
1212 | |
1213 // Create the garbage collection operation... | |
1973 | 1214 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 1215 // ...and get the VM thread to execute it. |
1216 VMThread::execute(&op); | |
1973 | 1217 |
1218 assert_heap_not_locked(); | |
1219 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
1220 // If the operation was successful we'll return the result even | |
1221 // if it is NULL. If the allocation attempt failed immediately | |
1222 // after a Full GC, it's unlikely we'll be able to allocate now. | |
1223 HeapWord* result = op.result(); | |
1224 if (result != NULL && !isHumongous(word_size)) { | |
1225 // Allocations that take place on VM operations do not do any | |
1226 // card dirtying and we have to do it here. We only have to do | |
1227 // this for non-humongous allocations, though. | |
1228 dirty_young_block(result, word_size); | |
1229 } | |
342 | 1230 return result; |
1973 | 1231 } else { |
1232 assert(op.result() == NULL, | |
1233 "the result should be NULL if the VM op did not succeed"); | |
342 | 1234 } |
1235 | |
1236 // Give a warning if we seem to be looping forever. | |
1237 if ((QueuedAllocationWarningCount > 0) && | |
1238 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 1239 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 1240 } |
1241 } | |
1973 | 1242 |
1243 ShouldNotReachHere(); | |
342 | 1244 } |
1245 | |
1246 void G1CollectedHeap::abandon_cur_alloc_region() { | |
2152 | 1247 assert_at_safepoint(true /* should_be_vm_thread */); |
1248 | |
1249 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
1250 if (cur_alloc_region != NULL) { | |
1251 assert(!cur_alloc_region->is_empty(), | |
1252 "the current alloc region can never be empty"); | |
1253 assert(cur_alloc_region->is_young(), | |
1254 "the current alloc region should be young"); | |
1255 | |
1256 retire_cur_alloc_region_common(cur_alloc_region); | |
1257 } | |
1258 assert(_cur_alloc_region == NULL, "post-condition"); | |
342 | 1259 } |
1260 | |
636 | 1261 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1262 // first, make sure that the GC alloc region list is empty (it should!) | |
1263 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1264 release_gc_alloc_regions(true /* totally */); | |
1265 } | |
1266 | |
342 | 1267 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1268 ModRefBarrierSet* _mr_bs; | |
1269 public: | |
1270 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1271 bool doHeapRegion(HeapRegion* r) { | |
1272 r->reset_gc_time_stamp(); | |
1273 if (r->continuesHumongous()) | |
1274 return false; | |
1275 HeapRegionRemSet* hrrs = r->rem_set(); | |
1276 if (hrrs != NULL) hrrs->clear(); | |
1277 // You might think here that we could clear just the cards | |
1278 // corresponding to the used region. But no: if we leave a dirty card | |
1279 // in a region we might allocate into, then it would prevent that card | |
1280 // from being enqueued, and cause it to be missed. | |
1281 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1282 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1283 return false; | |
1284 } | |
1285 }; | |
1286 | |
1287 | |
1288 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1289 ModRefBarrierSet* _mr_bs; | |
1290 public: | |
1291 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1292 bool doHeapRegion(HeapRegion* r) { | |
1293 if (r->continuesHumongous()) return false; | |
1294 if (r->used_region().word_size() != 0) { | |
1295 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1296 } | |
1297 return false; | |
1298 } | |
1299 }; | |
1300 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1301 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1302 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1303 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1304 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1305 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1306 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1307 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1308 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1309 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1310 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1311 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1312 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1313 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1314 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1315 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1316 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1317 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1318 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1321 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1322 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1323 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1324 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1325 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1326 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1327 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1328 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1329 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1330 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1331 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1332 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1333 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1334 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1335 |
1973 | 1336 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1337 bool clear_all_soft_refs, |
342 | 1338 size_t word_size) { |
2152 | 1339 assert_at_safepoint(true /* should_be_vm_thread */); |
1340 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1341 if (GC_locker::check_active_before_gc()) { |
1973 | 1342 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1343 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1344 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
1345 SvcGCMarker sgcm(SvcGCMarker::FULL); |
342 | 1346 ResourceMark rm; |
1347 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1348 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1349 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1350 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1351 |
2152 | 1352 verify_region_sets_optional(); |
342 | 1353 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1354 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1355 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1356 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1357 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1358 |
342 | 1359 { |
1360 IsGCActiveMark x; | |
1361 | |
1362 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1363 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1364 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1365 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1366 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1367 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1368 PrintGC, true, gclog_or_tty); |
342 | 1369 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1370 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1371 |
342 | 1372 double start = os::elapsedTime(); |
1373 g1_policy()->record_full_collection_start(); | |
1374 | |
2152 | 1375 wait_while_free_regions_coming(); |
1376 append_secondary_free_list_if_not_empty(); | |
1377 | |
342 | 1378 gc_prologue(true); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1379 increment_total_collections(true /* full gc */); |
342 | 1380 |
1381 size_t g1h_prev_used = used(); | |
1382 assert(used() == recalculate_used(), "Should be equal"); | |
1383 | |
1384 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1385 HandleMark hm; // Discard invalid handles created during verification | |
1386 prepare_for_verify(); | |
1387 gclog_or_tty->print(" VerifyBeforeGC:"); | |
1388 Universe::verify(true); | |
1389 } | |
1390 | |
1391 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1392 | |
1393 // We want to discover references, but not process them yet. | |
1394 // This mode is disabled in | |
1395 // instanceRefKlass::process_discovered_references if the | |
1396 // generation does some collection work, or | |
1397 // instanceRefKlass::enqueue_discovered_references if the | |
1398 // generation returns without doing any work. | |
1399 ref_processor()->disable_discovery(); | |
1400 ref_processor()->abandon_partial_discovery(); | |
1401 ref_processor()->verify_no_references_recorded(); | |
1402 | |
1403 // Abandon current iterations of concurrent marking and concurrent | |
1404 // refinement, if any are in progress. | |
1405 concurrent_mark()->abort(); | |
1406 | |
1407 // Make sure we'll choose a new allocation region afterwards. | |
1408 abandon_cur_alloc_region(); | |
636 | 1409 abandon_gc_alloc_regions(); |
342 | 1410 assert(_cur_alloc_region == NULL, "Invariant."); |
1861 | 1411 g1_rem_set()->cleanupHRRS(); |
342 | 1412 tear_down_region_lists(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1413 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1414 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1415 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1416 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1417 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1418 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1419 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1420 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1421 |
342 | 1422 if (g1_policy()->in_young_gc_mode()) { |
1423 empty_young_list(); | |
1424 g1_policy()->set_full_young_gcs(true); | |
1425 } | |
1426 | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1427 // See the comment in G1CollectedHeap::ref_processing_init() about |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1428 // how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
1429 |
342 | 1430 // Temporarily make reference _discovery_ single threaded (non-MT). |
1431 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
1432 | |
1433 // Temporarily make refs discovery atomic | |
1434 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1435 | |
1436 // Temporarily clear _is_alive_non_header | |
1437 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1438 | |
1439 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1440 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1441 |
1442 // Do collection work | |
1443 { | |
1444 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1445 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1446 } |
2152 | 1447 assert(free_regions() == 0, "we should not have added any free regions"); |
342 | 1448 rebuild_region_lists(); |
1449 | |
1450 _summary_bytes_used = recalculate_used(); | |
1451 | |
1452 ref_processor()->enqueue_discovered_references(); | |
1453 | |
1454 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1455 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1456 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1457 |
342 | 1458 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1459 HandleMark hm; // Discard invalid handles created during verification | |
1460 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1461 prepare_for_verify(); |
342 | 1462 Universe::verify(false); |
1463 } | |
1464 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1465 | |
1466 reset_gc_time_stamp(); | |
1467 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1468 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1469 // sets. We will also reset the GC time stamps of the regions. |
342 | 1470 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1471 heap_region_iterate(&rs_clear); | |
1472 | |
1473 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1474 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1475 |
1476 if (_cg1r->use_cache()) { | |
1477 _cg1r->clear_and_record_card_counts(); | |
1478 _cg1r->clear_hot_cache(); | |
1479 } | |
1480 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1481 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1482 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1483 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1484 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1485 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1486 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1487 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1488 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1489 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1490 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1491 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1492 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1493 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1494 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1495 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1496 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1497 |
342 | 1498 if (PrintGC) { |
1499 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1500 } | |
1501 | |
1502 if (true) { // FIXME | |
1503 // Ask the permanent generation to adjust size for full collections | |
1504 perm()->compute_new_size(); | |
1505 } | |
1506 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1507 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1508 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1509 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1510 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1511 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1512 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1513 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1514 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1515 |
342 | 1516 double end = os::elapsedTime(); |
1517 g1_policy()->record_full_collection_end(); | |
1518 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1519 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1520 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1521 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1522 |
342 | 1523 gc_epilogue(true); |
1524 | |
794 | 1525 // Discard all rset updates |
1526 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1527 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1528 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1529 } |
1530 | |
1531 if (g1_policy()->in_young_gc_mode()) { | |
1532 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1533 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1534 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1535 assert( check_young_list_empty(true /* check_heap */), |
342 | 1536 "young list should be empty at this point"); |
1537 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1538 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1539 // Update the number of full collections that have been completed. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
1540 increment_full_collections_completed(false /* concurrent */); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1541 |
2152 | 1542 verify_region_sets_optional(); |
1543 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1544 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1545 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1546 } |
1973 | 1547 |
1548 return true; | |
342 | 1549 } |
1550 | |
1551 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1552 // do_collection() will return whether it succeeded in performing |
1553 // the GC. Currently, there is no facility on the | |
1554 // do_full_collection() API to notify the caller than the collection | |
1555 // did not succeed (e.g., because it was locked out by the GC | |
1556 // locker). So, right now, we'll ignore the return value. | |
1557 bool dummy = do_collection(true, /* explicit_gc */ | |
1558 clear_all_soft_refs, | |
1559 0 /* word_size */); | |
342 | 1560 } |
1561 | |
1562 // This code is mostly copied from TenuredGeneration. | |
1563 void | |
1564 G1CollectedHeap:: | |
1565 resize_if_necessary_after_full_collection(size_t word_size) { | |
1566 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1567 | |
1568 // Include the current allocation, if any, and bytes that will be | |
1569 // pre-allocated to support collections, as "used". | |
1570 const size_t used_after_gc = used(); | |
1571 const size_t capacity_after_gc = capacity(); | |
1572 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1573 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1574 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1575 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1576 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1577 |
342 | 1578 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1579 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1580 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1581 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1582 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1583 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1584 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1585 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1586 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1587 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1588 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1589 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1590 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1591 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1592 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1593 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1594 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1595 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1596 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1597 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1598 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1599 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1600 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1601 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1602 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1603 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1604 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1605 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1606 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1607 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1608 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1609 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1610 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1611 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1612 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1613 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1614 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1615 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1616 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1617 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1618 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1619 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1620 |
1621 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1622 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1623 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1624 gclog_or_tty->print_cr("Computing new size after full GC "); |
1625 gclog_or_tty->print_cr(" " | |
1626 " minimum_free_percentage: %6.2f", | |
1627 minimum_free_percentage); | |
1628 gclog_or_tty->print_cr(" " | |
1629 " maximum_free_percentage: %6.2f", | |
1630 maximum_free_percentage); | |
1631 gclog_or_tty->print_cr(" " | |
1632 " capacity: %6.1fK" | |
1633 " minimum_desired_capacity: %6.1fK" | |
1634 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1635 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1636 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1637 (double) maximum_desired_capacity / (double) K); |
342 | 1638 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1639 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1640 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1641 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1642 (double) used_after_gc / (double) K); |
342 | 1643 gclog_or_tty->print_cr(" " |
1644 " free_percentage: %6.2f", | |
1645 free_percentage); | |
1646 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1647 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1648 // Don't expand unless it's significant |
1649 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1650 expand(expand_bytes); | |
1651 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1652 gclog_or_tty->print_cr(" " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1653 " expanding:" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1654 " max_heap_size: %6.1fK" |
342 | 1655 " minimum_desired_capacity: %6.1fK" |
1656 " expand_bytes: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1657 (double) max_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1658 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1659 (double) expand_bytes / (double) K); |
342 | 1660 } |
1661 | |
1662 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1663 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1664 // Capacity too large, compute shrinking size |
1665 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1666 shrink(shrink_bytes); | |
1667 if (PrintGC && Verbose) { | |
1668 gclog_or_tty->print_cr(" " | |
1669 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1670 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1671 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1672 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1673 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1674 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1675 (double) shrink_bytes / (double) K); |
342 | 1676 } |
1677 } | |
1678 } | |
1679 | |
1680 | |
1681 HeapWord* | |
1973 | 1682 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1683 bool* succeeded) { | |
2152 | 1684 assert_at_safepoint(true /* should_be_vm_thread */); |
1973 | 1685 |
1686 *succeeded = true; | |
1687 // Let's attempt the allocation first. | |
1688 HeapWord* result = attempt_allocation_at_safepoint(word_size, | |
1689 false /* expect_null_cur_alloc_region */); | |
1690 if (result != NULL) { | |
1691 assert(*succeeded, "sanity"); | |
1692 return result; | |
1693 } | |
342 | 1694 |
1695 // In a G1 heap, we're supposed to keep allocation from failing by | |
1696 // incremental pauses. Therefore, at least for now, we'll favor | |
1697 // expansion over collection. (This might change in the future if we can | |
1698 // do something smarter than full collection to satisfy a failed alloc.) | |
1699 result = expand_and_allocate(word_size); | |
1700 if (result != NULL) { | |
1973 | 1701 assert(*succeeded, "sanity"); |
342 | 1702 return result; |
1703 } | |
1704 | |
1973 | 1705 // Expansion didn't work, we'll try to do a Full GC. |
1706 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1707 false, /* clear_all_soft_refs */ | |
1708 word_size); | |
1709 if (!gc_succeeded) { | |
1710 *succeeded = false; | |
1711 return NULL; | |
1712 } | |
1713 | |
1714 // Retry the allocation | |
1715 result = attempt_allocation_at_safepoint(word_size, | |
1716 true /* expect_null_cur_alloc_region */); | |
342 | 1717 if (result != NULL) { |
1973 | 1718 assert(*succeeded, "sanity"); |
342 | 1719 return result; |
1720 } | |
1721 | |
1973 | 1722 // Then, try a Full GC that will collect all soft references. |
1723 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1724 true, /* clear_all_soft_refs */ | |
1725 word_size); | |
1726 if (!gc_succeeded) { | |
1727 *succeeded = false; | |
1728 return NULL; | |
1729 } | |
1730 | |
1731 // Retry the allocation once more | |
1732 result = attempt_allocation_at_safepoint(word_size, | |
1733 true /* expect_null_cur_alloc_region */); | |
342 | 1734 if (result != NULL) { |
1973 | 1735 assert(*succeeded, "sanity"); |
342 | 1736 return result; |
1737 } | |
1738 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1739 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1740 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1741 |
342 | 1742 // What else? We might try synchronous finalization later. If the total |
1743 // space available is large enough for the allocation, then a more | |
1744 // complete compaction phase than we've tried so far might be | |
1745 // appropriate. | |
1973 | 1746 assert(*succeeded, "sanity"); |
342 | 1747 return NULL; |
1748 } | |
1749 | |
1750 // Attempting to expand the heap sufficiently | |
1751 // to support an allocation of the given "word_size". If | |
1752 // successful, perform the allocation and return the address of the | |
1753 // allocated block, or else "NULL". | |
1754 | |
1755 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
2152 | 1756 assert_at_safepoint(true /* should_be_vm_thread */); |
1757 | |
1758 verify_region_sets_optional(); | |
1973 | 1759 |
342 | 1760 size_t expand_bytes = word_size * HeapWordSize; |
1761 if (expand_bytes < MinHeapDeltaBytes) { | |
1762 expand_bytes = MinHeapDeltaBytes; | |
1763 } | |
1764 expand(expand_bytes); | |
2152 | 1765 |
1766 verify_region_sets_optional(); | |
1973 | 1767 |
1768 return attempt_allocation_at_safepoint(word_size, | |
1975
d9310331a29c
7003860: G1: assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region) fails
tonyp
parents:
1974
diff
changeset
|
1769 false /* expect_null_cur_alloc_region */); |
342 | 1770 } |
1771 | |
1772 // FIXME: both this and shrink could probably be more efficient by | |
1773 // doing one "VirtualSpace::expand_by" call rather than several. | |
1774 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1775 size_t old_mem_size = _g1_storage.committed_size(); | |
1776 // We expand by a minimum of 1K. | |
1777 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1778 size_t aligned_expand_bytes = | |
1779 ReservedSpace::page_align_size_up(expand_bytes); | |
1780 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1781 HeapRegion::GrainBytes); | |
1782 expand_bytes = aligned_expand_bytes; | |
1783 while (expand_bytes > 0) { | |
1784 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1785 // Commit more storage. | |
1786 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1787 if (!successful) { | |
1788 expand_bytes = 0; | |
1789 } else { | |
1790 expand_bytes -= HeapRegion::GrainBytes; | |
1791 // Expand the committed region. | |
1792 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1793 _g1_committed.set_end(high); | |
1794 // Create a new HeapRegion. | |
1795 MemRegion mr(base, high); | |
1796 bool is_zeroed = !_g1_max_committed.contains(base); | |
1797 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1798 | |
1799 // Now update max_committed if necessary. | |
1800 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1801 | |
1802 // Add it to the HeapRegionSeq. | |
1803 _hrs->insert(hr); | |
2152 | 1804 _free_list.add_as_tail(hr); |
342 | 1805 // And we used up an expansion region to create it. |
1806 _expansion_regions--; | |
1807 // Tell the cardtable about it. | |
1808 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1809 // And the offset table as well. | |
1810 _bot_shared->resize(_g1_committed.word_size()); | |
1811 } | |
1812 } | |
2152 | 1813 |
342 | 1814 if (Verbose && PrintGC) { |
1815 size_t new_mem_size = _g1_storage.committed_size(); | |
1816 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1817 old_mem_size/K, aligned_expand_bytes/K, | |
1818 new_mem_size/K); | |
1819 } | |
1820 } | |
1821 | |
1822 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1823 { | |
1824 size_t old_mem_size = _g1_storage.committed_size(); | |
1825 size_t aligned_shrink_bytes = | |
1826 ReservedSpace::page_align_size_down(shrink_bytes); | |
1827 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1828 HeapRegion::GrainBytes); | |
1829 size_t num_regions_deleted = 0; | |
1830 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1831 | |
1832 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1833 if (mr.byte_size() > 0) | |
1834 _g1_storage.shrink_by(mr.byte_size()); | |
1835 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1836 | |
1837 _g1_committed.set_end(mr.start()); | |
1838 _expansion_regions += num_regions_deleted; | |
1839 | |
1840 // Tell the cardtable about it. | |
1841 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1842 | |
1843 // And the offset table as well. | |
1844 _bot_shared->resize(_g1_committed.word_size()); | |
1845 | |
1846 HeapRegionRemSet::shrink_heap(n_regions()); | |
1847 | |
1848 if (Verbose && PrintGC) { | |
1849 size_t new_mem_size = _g1_storage.committed_size(); | |
1850 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1851 old_mem_size/K, aligned_shrink_bytes/K, | |
1852 new_mem_size/K); | |
1853 } | |
1854 } | |
1855 | |
1856 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
2152 | 1857 verify_region_sets_optional(); |
1858 | |
636 | 1859 release_gc_alloc_regions(true /* totally */); |
2152 | 1860 // Instead of tearing down / rebuilding the free lists here, we |
1861 // could instead use the remove_all_pending() method on free_list to | |
1862 // remove only the ones that we need to remove. | |
342 | 1863 tear_down_region_lists(); // We will rebuild them in a moment. |
1864 shrink_helper(shrink_bytes); | |
1865 rebuild_region_lists(); | |
2152 | 1866 |
1867 verify_region_sets_optional(); | |
342 | 1868 } |
1869 | |
1870 // Public methods. | |
1871 | |
1872 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1873 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1874 #endif // _MSC_VER | |
1875 | |
1876 | |
1877 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1878 SharedHeap(policy_), | |
1879 _g1_policy(policy_), | |
1111 | 1880 _dirty_card_queue_set(false), |
1705 | 1881 _into_cset_dirty_card_queue_set(false), |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
1882 _is_alive_closure(this), |
342 | 1883 _ref_processor(NULL), |
1884 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1885 _bot_shared(NULL), | |
1886 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1887 _evac_failure_scan_stack(NULL) , | |
1888 _mark_in_progress(false), | |
2152 | 1889 _cg1r(NULL), _summary_bytes_used(0), |
342 | 1890 _cur_alloc_region(NULL), |
1891 _refine_cte_cl(NULL), | |
1892 _full_collection(false), | |
2152 | 1893 _free_list("Master Free List"), |
1894 _secondary_free_list("Secondary Free List"), | |
1895 _humongous_set("Master Humongous Set"), | |
1896 _free_regions_coming(false), | |
342 | 1897 _young_list(new YoungList(this)), |
1898 _gc_time_stamp(0), | |
526 | 1899 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1900 _full_collections_completed(0), |
526 | 1901 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1902 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1903 _dirty_cards_region_list(NULL) { |
342 | 1904 _g1h = this; // To catch bugs. |
1905 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1906 vm_exit_during_initialization("Failed necessary allocation."); | |
1907 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1908 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1909 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1910 |
342 | 1911 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1912 _task_queues = new RefToScanQueueSet(n_queues); | |
1913 | |
1914 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1915 assert(n_rem_sets > 0, "Invariant."); | |
1916 | |
1917 HeapRegionRemSetIterator** iter_arr = | |
1918 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1919 for (int i = 0; i < n_queues; i++) { | |
1920 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1921 } | |
1922 _rem_set_iterator = iter_arr; | |
1923 | |
1924 for (int i = 0; i < n_queues; i++) { | |
1925 RefToScanQueue* q = new RefToScanQueue(); | |
1926 q->initialize(); | |
1927 _task_queues->register_queue(i, q); | |
1928 } | |
1929 | |
1930 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1931 _gc_alloc_regions[ap] = NULL; |
1932 _gc_alloc_region_counts[ap] = 0; | |
1933 _retained_gc_alloc_regions[ap] = NULL; | |
1934 // by default, we do not retain a GC alloc region for each ap; | |
1935 // we'll override this, when appropriate, below | |
1936 _retain_gc_alloc_region[ap] = false; | |
1937 } | |
1938 | |
1939 // We will try to remember the last half-full tenured region we | |
1940 // allocated to at the end of a collection so that we can re-use it | |
1941 // during the next collection. | |
1942 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1943 | |
342 | 1944 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1945 } | |
1946 | |
1947 jint G1CollectedHeap::initialize() { | |
1166 | 1948 CollectedHeap::pre_initialize(); |
342 | 1949 os::enable_vtime(); |
1950 | |
1951 // Necessary to satisfy locking discipline assertions. | |
1952 | |
1953 MutexLocker x(Heap_lock); | |
1954 | |
1955 // While there are no constraints in the GC code that HeapWordSize | |
1956 // be any particular value, there are multiple other areas in the | |
1957 // system which believe this to be true (e.g. oop->object_size in some | |
1958 // cases incorrectly returns the size in wordSize units rather than | |
1959 // HeapWordSize). | |
1960 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1961 | |
1962 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1963 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1964 | |
1965 // Ensure that the sizes are properly aligned. | |
1966 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1967 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1968 | |
1969 _cg1r = new ConcurrentG1Refine(); | |
1970 | |
1971 // Reserve the maximum. | |
1972 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1973 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1974 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1975 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1976 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1977 |
342 | 1978 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1979 HeapRegion::GrainBytes, | |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1980 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1981 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1982 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1983 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1984 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1985 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1986 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1987 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1988 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1989 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1990 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1991 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1992 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1993 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1994 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
2135
2e0b0c4671e4
6941122: G1: UseLargePages does not work with G1 garbage collector
brutisso
parents:
2134
diff
changeset
|
1995 UseLargePages, addr); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1996 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1997 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1998 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1999 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
2000 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
2001 } |
342 | 2002 |
2003 if (!heap_rs.is_reserved()) { | |
2004 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
2005 return JNI_ENOMEM; | |
2006 } | |
2007 | |
2008 // It is important to do this in a way such that concurrent readers can't | |
2009 // temporarily think somethings in the heap. (I've actually seen this | |
2010 // happen in asserts: DLD.) | |
2011 _reserved.set_word_size(0); | |
2012 _reserved.set_start((HeapWord*)heap_rs.base()); | |
2013 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
2014 | |
2015 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
2016 | |
2017 // Create the gen rem set (and barrier set) for the entire reserved region. | |
2018 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
2019 set_barrier_set(rem_set()->bs()); | |
2020 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
2021 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
2022 } else { | |
2023 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
2024 return JNI_ENOMEM; | |
2025 } | |
2026 | |
2027 // Also create a G1 rem set. | |
1861 | 2028 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
2029 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 2030 } else { |
1861 | 2031 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
2032 return JNI_ENOMEM; | |
342 | 2033 } |
2034 | |
2035 // Carve out the G1 part of the heap. | |
2036 | |
2037 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
2038 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
2039 g1_rs.size()/HeapWordSize); | |
2040 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
2041 | |
2042 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
2043 | |
2044 _g1_storage.initialize(g1_rs, 0); | |
2045 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
2046 _g1_max_committed = _g1_committed; | |
393 | 2047 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 2048 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
2049 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
2050 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2051 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2052 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2053 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2054 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2055 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2056 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
2057 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
2058 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
2059 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
2060 |
2152 | 2061 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); |
2062 | |
342 | 2063 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
2064 heap_word_size(init_byte_size)); | |
2065 | |
2066 _g1h = this; | |
2067 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2068 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2069 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2070 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2071 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2072 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2073 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2074 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2075 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2076 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2077 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2078 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2079 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2080 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
2081 |
342 | 2082 // Create the ConcurrentMark data structure and thread. |
2083 // (Must do this late, so that "max_regions" is defined.) | |
2084 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
2085 _cmThread = _cm->cmThread(); | |
2086 | |
2087 // Initialize the from_card cache structure of HeapRegionRemSet. | |
2088 HeapRegionRemSet::init_heap(max_regions()); | |
2089 | |
677 | 2090 // Now expand into the initial heap size. |
2091 expand(init_byte_size); | |
342 | 2092 |
2093 // Perform any initialization actions delegated to the policy. | |
2094 g1_policy()->init(); | |
2095 | |
2096 g1_policy()->note_start_of_mark_thread(); | |
2097 | |
2098 _refine_cte_cl = | |
2099 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
2100 g1_rem_set(), | |
2101 concurrent_g1_refine()); | |
2102 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
2103 | |
2104 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
2105 SATB_Q_FL_lock, | |
1111 | 2106 G1SATBProcessCompletedThreshold, |
342 | 2107 Shared_SATB_Q_lock); |
794 | 2108 |
2109 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
2110 DirtyCardQ_FL_lock, | |
1111 | 2111 concurrent_g1_refine()->yellow_zone(), |
2112 concurrent_g1_refine()->red_zone(), | |
794 | 2113 Shared_DirtyCardQ_lock); |
2114 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2115 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2116 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2117 DirtyCardQ_FL_lock, |
1111 | 2118 -1, // never trigger processing |
2119 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2120 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2121 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
2122 } |
1705 | 2123 |
2124 // Initialize the card queue set used to hold cards containing | |
2125 // references into the collection set. | |
2126 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
2127 DirtyCardQ_FL_lock, | |
2128 -1, // never trigger processing | |
2129 -1, // no limit on length | |
2130 Shared_DirtyCardQ_lock, | |
2131 &JavaThread::dirty_card_queue_set()); | |
2132 | |
342 | 2133 // In case we're keeping closure specialization stats, initialize those |
2134 // counts and that mechanism. | |
2135 SpecializationStats::clear(); | |
2136 | |
2137 _gc_alloc_region_list = NULL; | |
2138 | |
2139 // Do later initialization work for concurrent refinement. | |
2140 _cg1r->init(); | |
2141 | |
2142 return JNI_OK; | |
2143 } | |
2144 | |
2145 void G1CollectedHeap::ref_processing_init() { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2146 // Reference processing in G1 currently works as follows: |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2147 // |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2148 // * There is only one reference processor instance that |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2149 // 'spans' the entire heap. It is created by the code |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2150 // below. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2151 // * Reference discovery is not enabled during an incremental |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2152 // pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2153 // * Discoverered refs are not enqueued nor are they processed |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2154 // during an incremental pause (see 6484982). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2155 // * Reference discovery is enabled at initial marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2156 // * Reference discovery is disabled and the discovered |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2157 // references processed etc during remarking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2158 // * Reference discovery is MT (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2159 // * Reference discovery requires a barrier (see below). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2160 // * Reference processing is currently not MT (see 6608385). |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2161 // * A full GC enables (non-MT) reference discovery and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2162 // processes any discovered references. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
2163 |
342 | 2164 SharedHeap::ref_processing_init(); |
2165 MemRegion mr = reserved_region(); | |
2166 _ref_processor = ReferenceProcessor::create_ref_processor( | |
2167 mr, // span | |
2168 false, // Reference discovery is not atomic | |
2169 true, // mt_discovery | |
2037
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
2170 &_is_alive_closure, // is alive closure |
b03260081e9b
7006113: G1: Initialize ReferenceProcessor::_is_alive_non_header field
johnc
parents:
2030
diff
changeset
|
2171 // for efficiency |
342 | 2172 ParallelGCThreads, |
2173 ParallelRefProcEnabled, | |
2174 true); // Setting next fields of discovered | |
2175 // lists requires a barrier. | |
2176 } | |
2177 | |
2178 size_t G1CollectedHeap::capacity() const { | |
2179 return _g1_committed.byte_size(); | |
2180 } | |
2181 | |
1705 | 2182 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2183 DirtyCardQueue* into_cset_dcq, | |
2184 bool concurrent, | |
342 | 2185 int worker_i) { |
889 | 2186 // Clean cards in the hot card cache |
1705 | 2187 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2188 |
342 | 2189 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2190 int n_completed_buffers = 0; | |
1705 | 2191 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2192 n_completed_buffers++; |
2193 } | |
2194 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2195 (double) n_completed_buffers); | |
2196 dcqs.clear_n_completed_buffers(); | |
2197 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2198 } | |
2199 | |
2200 | |
2201 // Computes the sum of the storage used by the various regions. | |
2202 | |
2203 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2204 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2205 "Should be owned on this thread's behalf."); |
342 | 2206 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2207 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2208 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2209 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2210 result += hr->used(); |
342 | 2211 return result; |
2212 } | |
2213 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2214 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2215 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2216 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2217 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2218 |
342 | 2219 class SumUsedClosure: public HeapRegionClosure { |
2220 size_t _used; | |
2221 public: | |
2222 SumUsedClosure() : _used(0) {} | |
2223 bool doHeapRegion(HeapRegion* r) { | |
2224 if (!r->continuesHumongous()) { | |
2225 _used += r->used(); | |
2226 } | |
2227 return false; | |
2228 } | |
2229 size_t result() { return _used; } | |
2230 }; | |
2231 | |
2232 size_t G1CollectedHeap::recalculate_used() const { | |
2233 SumUsedClosure blk; | |
2234 _hrs->iterate(&blk); | |
2235 return blk.result(); | |
2236 } | |
2237 | |
2238 #ifndef PRODUCT | |
2239 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2240 size_t _num; | |
2241 public: | |
677 | 2242 SumUsedRegionsClosure() : _num(0) {} |
342 | 2243 bool doHeapRegion(HeapRegion* r) { |
2244 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2245 _num += 1; | |
2246 } | |
2247 return false; | |
2248 } | |
2249 size_t result() { return _num; } | |
2250 }; | |
2251 | |
2252 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2253 SumUsedRegionsClosure blk; | |
2254 _hrs->iterate(&blk); | |
2255 return blk.result(); | |
2256 } | |
2257 #endif // PRODUCT | |
2258 | |
2259 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2152 | 2260 if (free_regions() > 0) return HeapRegion::GrainBytes; |
342 | 2261 // otherwise, is there space in the current allocation region? |
2262 | |
2263 // We need to store the current allocation region in a local variable | |
2264 // here. The problem is that this method doesn't take any locks and | |
2265 // there may be other threads which overwrite the current allocation | |
2266 // region field. attempt_allocation(), for example, sets it to NULL | |
2267 // and this can happen *after* the NULL check here but before the call | |
2268 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2269 // to be a problem in the optimized build, since the two loads of the | |
2270 // current allocation region field are optimized away. | |
2271 HeapRegion* car = _cur_alloc_region; | |
2272 | |
2273 // FIXME: should iterate over all regions? | |
2274 if (car == NULL) { | |
2275 return 0; | |
2276 } | |
2277 return car->free(); | |
2278 } | |
2279 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2280 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2281 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2282 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2283 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2284 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2285 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2286 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2287 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2288 |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2289 // We assume that if concurrent == true, then the caller is a |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2290 // concurrent thread that was joined the Suspendible Thread |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2291 // Set. If there's ever a cheap way to check this, we should add an |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2292 // assert here. |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2293 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2294 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2295 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2296 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2297 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2298 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2299 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2300 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2301 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2302 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2303 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2304 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2305 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2306 // This is the case for the inner caller, i.e. a Full GC. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2307 assert(concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2308 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2309 (full_collections_started == _full_collections_completed + 2), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2310 err_msg("for inner caller (Full GC): full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2311 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2312 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2313 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2314 // This is the case for the outer caller, i.e. the concurrent cycle. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2315 assert(!concurrent || |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2316 (full_collections_started == _full_collections_completed + 1), |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2317 err_msg("for outer caller (concurrent cycle): " |
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2318 "full_collections_started = %u " |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2319 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2320 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2321 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2322 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2323 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2324 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2325 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2326 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2327 // incorrectly see that a marking cyle is still in progress. |
2030
fb712ff22571
7000559: G1: assertion failure !outer || (full_collections_started == _full_collections_completed + 1)
tonyp
parents:
1995
diff
changeset
|
2328 if (concurrent) { |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2329 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2330 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2331 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2332 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2333 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2334 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2335 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2336 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2337 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2338 |
342 | 2339 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2152 | 2340 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 2341 GCCauseSetter gcs(this, cause); |
2342 switch (cause) { | |
2343 case GCCause::_heap_inspection: | |
2344 case GCCause::_heap_dump: { | |
2345 HandleMark hm; | |
2346 do_full_collection(false); // don't clear all soft refs | |
2347 break; | |
2348 } | |
2349 default: // XXX FIX ME | |
2350 ShouldNotReachHere(); // Unexpected use of this function | |
2351 } | |
2352 } | |
2353 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2354 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2355 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2356 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2357 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2358 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2359 unsigned int full_gc_count_before; |
342 | 2360 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2361 MutexLocker ml(Heap_lock); |
1973 | 2362 |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2363 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2364 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2365 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2366 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2367 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2368 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2369 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2370 // concurrent cycle. We're setting word_size to 0 which means that |
2371 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2372 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2373 0, /* word_size */ |
2374 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2375 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2376 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2377 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2378 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2379 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2380 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2381 |
1973 | 2382 // Schedule a standard evacuation pause. We're setting word_size |
2383 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2384 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2385 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2386 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2387 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2388 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2389 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2390 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2391 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2392 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2393 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2394 } |
342 | 2395 } |
2396 } | |
2397 | |
2398 bool G1CollectedHeap::is_in(const void* p) const { | |
2399 if (_g1_committed.contains(p)) { | |
2400 HeapRegion* hr = _hrs->addr_to_region(p); | |
2401 return hr->is_in(p); | |
2402 } else { | |
2403 return _perm_gen->as_gen()->is_in(p); | |
2404 } | |
2405 } | |
2406 | |
2407 // Iteration functions. | |
2408 | |
2409 // Iterates an OopClosure over all ref-containing fields of objects | |
2410 // within a HeapRegion. | |
2411 | |
2412 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2413 MemRegion _mr; | |
2414 OopClosure* _cl; | |
2415 public: | |
2416 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2417 : _mr(mr), _cl(cl) {} | |
2418 bool doHeapRegion(HeapRegion* r) { | |
2419 if (! r->continuesHumongous()) { | |
2420 r->oop_iterate(_cl); | |
2421 } | |
2422 return false; | |
2423 } | |
2424 }; | |
2425 | |
678 | 2426 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2427 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2428 _hrs->iterate(&blk); | |
678 | 2429 if (do_perm) { |
2430 perm_gen()->oop_iterate(cl); | |
2431 } | |
342 | 2432 } |
2433 | |
678 | 2434 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2435 IterateOopClosureRegionClosure blk(mr, cl); |
2436 _hrs->iterate(&blk); | |
678 | 2437 if (do_perm) { |
2438 perm_gen()->oop_iterate(cl); | |
2439 } | |
342 | 2440 } |
2441 | |
2442 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2443 | |
2444 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2445 ObjectClosure* _cl; | |
2446 public: | |
2447 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2448 bool doHeapRegion(HeapRegion* r) { | |
2449 if (! r->continuesHumongous()) { | |
2450 r->object_iterate(_cl); | |
2451 } | |
2452 return false; | |
2453 } | |
2454 }; | |
2455 | |
678 | 2456 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2457 IterateObjectClosureRegionClosure blk(cl); |
2458 _hrs->iterate(&blk); | |
678 | 2459 if (do_perm) { |
2460 perm_gen()->object_iterate(cl); | |
2461 } | |
342 | 2462 } |
2463 | |
2464 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2465 // FIXME: is this right? | |
2466 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2467 } | |
2468 | |
2469 // Calls a SpaceClosure on a HeapRegion. | |
2470 | |
2471 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2472 SpaceClosure* _cl; | |
2473 public: | |
2474 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2475 bool doHeapRegion(HeapRegion* r) { | |
2476 _cl->do_space(r); | |
2477 return false; | |
2478 } | |
2479 }; | |
2480 | |
2481 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2482 SpaceClosureRegionClosure blk(cl); | |
2483 _hrs->iterate(&blk); | |
2484 } | |
2485 | |
2486 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
2487 _hrs->iterate(cl); | |
2488 } | |
2489 | |
2490 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
2491 HeapRegionClosure* cl) { | |
2492 _hrs->iterate_from(r, cl); | |
2493 } | |
2494 | |
2495 void | |
2496 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
2497 _hrs->iterate_from(idx, cl); | |
2498 } | |
2499 | |
2500 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
2501 | |
2502 void | |
2503 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2504 int worker, | |
2505 jint claim_value) { | |
355 | 2506 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2507 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2508 // try to spread out the starting points of the workers |
2509 const size_t start_index = regions / worker_num * (size_t) worker; | |
2510 | |
2511 // each worker will actually look at all regions | |
2512 for (size_t count = 0; count < regions; ++count) { | |
2513 const size_t index = (start_index + count) % regions; | |
2514 assert(0 <= index && index < regions, "sanity"); | |
2515 HeapRegion* r = region_at(index); | |
2516 // we'll ignore "continues humongous" regions (we'll process them | |
2517 // when we come across their corresponding "start humongous" | |
2518 // region) and regions already claimed | |
2519 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2520 continue; | |
2521 } | |
2522 // OK, try to claim it | |
342 | 2523 if (r->claimHeapRegion(claim_value)) { |
355 | 2524 // success! |
2525 assert(!r->continuesHumongous(), "sanity"); | |
2526 if (r->startsHumongous()) { | |
2527 // If the region is "starts humongous" we'll iterate over its | |
2528 // "continues humongous" first; in fact we'll do them | |
2529 // first. The order is important. In on case, calling the | |
2530 // closure on the "starts humongous" region might de-allocate | |
2531 // and clear all its "continues humongous" regions and, as a | |
2532 // result, we might end up processing them twice. So, we'll do | |
2533 // them first (notice: most closures will ignore them anyway) and | |
2534 // then we'll do the "starts humongous" region. | |
2535 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2536 HeapRegion* chr = region_at(ch_index); | |
2537 | |
2538 // if the region has already been claimed or it's not | |
2539 // "continues humongous" we're done | |
2540 if (chr->claim_value() == claim_value || | |
2541 !chr->continuesHumongous()) { | |
2542 break; | |
2543 } | |
2544 | |
2545 // Noone should have claimed it directly. We can given | |
2546 // that we claimed its "starts humongous" region. | |
2547 assert(chr->claim_value() != claim_value, "sanity"); | |
2548 assert(chr->humongous_start_region() == r, "sanity"); | |
2549 | |
2550 if (chr->claimHeapRegion(claim_value)) { | |
2551 // we should always be able to claim it; noone else should | |
2552 // be trying to claim this region | |
2553 | |
2554 bool res2 = cl->doHeapRegion(chr); | |
2555 assert(!res2, "Should not abort"); | |
2556 | |
2557 // Right now, this holds (i.e., no closure that actually | |
2558 // does something with "continues humongous" regions | |
2559 // clears them). We might have to weaken it in the future, | |
2560 // but let's leave these two asserts here for extra safety. | |
2561 assert(chr->continuesHumongous(), "should still be the case"); | |
2562 assert(chr->humongous_start_region() == r, "sanity"); | |
2563 } else { | |
2564 guarantee(false, "we should not reach here"); | |
2565 } | |
2566 } | |
2567 } | |
2568 | |
2569 assert(!r->continuesHumongous(), "sanity"); | |
2570 bool res = cl->doHeapRegion(r); | |
2571 assert(!res, "Should not abort"); | |
2572 } | |
2573 } | |
2574 } | |
2575 | |
390 | 2576 class ResetClaimValuesClosure: public HeapRegionClosure { |
2577 public: | |
2578 bool doHeapRegion(HeapRegion* r) { | |
2579 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2580 return false; | |
2581 } | |
2582 }; | |
2583 | |
2584 void | |
2585 G1CollectedHeap::reset_heap_region_claim_values() { | |
2586 ResetClaimValuesClosure blk; | |
2587 heap_region_iterate(&blk); | |
2588 } | |
2589 | |
355 | 2590 #ifdef ASSERT |
2591 // This checks whether all regions in the heap have the correct claim | |
2592 // value. I also piggy-backed on this a check to ensure that the | |
2593 // humongous_start_region() information on "continues humongous" | |
2594 // regions is correct. | |
2595 | |
2596 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2597 private: | |
2598 jint _claim_value; | |
2599 size_t _failures; | |
2600 HeapRegion* _sh_region; | |
2601 public: | |
2602 CheckClaimValuesClosure(jint claim_value) : | |
2603 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2604 bool doHeapRegion(HeapRegion* r) { | |
2605 if (r->claim_value() != _claim_value) { | |
2606 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2607 "claim value = %d, should be %d", | |
2608 r->bottom(), r->end(), r->claim_value(), | |
2609 _claim_value); | |
2610 ++_failures; | |
2611 } | |
2612 if (!r->isHumongous()) { | |
2613 _sh_region = NULL; | |
2614 } else if (r->startsHumongous()) { | |
2615 _sh_region = r; | |
2616 } else if (r->continuesHumongous()) { | |
2617 if (r->humongous_start_region() != _sh_region) { | |
2618 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2619 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2620 r->bottom(), r->end(), | |
2621 r->humongous_start_region(), | |
2622 _sh_region); | |
2623 ++_failures; | |
342 | 2624 } |
2625 } | |
355 | 2626 return false; |
2627 } | |
2628 size_t failures() { | |
2629 return _failures; | |
2630 } | |
2631 }; | |
2632 | |
2633 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2634 CheckClaimValuesClosure cl(claim_value); | |
2635 heap_region_iterate(&cl); | |
2636 return cl.failures() == 0; | |
2637 } | |
2638 #endif // ASSERT | |
342 | 2639 |
2640 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2641 HeapRegion* r = g1_policy()->collection_set(); | |
2642 while (r != NULL) { | |
2643 HeapRegion* next = r->next_in_collection_set(); | |
2644 if (cl->doHeapRegion(r)) { | |
2645 cl->incomplete(); | |
2646 return; | |
2647 } | |
2648 r = next; | |
2649 } | |
2650 } | |
2651 | |
2652 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2653 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2654 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2655 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2656 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2657 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2658 |
342 | 2659 assert(r->in_collection_set(), |
2660 "Start region must be a member of the collection set."); | |
2661 HeapRegion* cur = r; | |
2662 while (cur != NULL) { | |
2663 HeapRegion* next = cur->next_in_collection_set(); | |
2664 if (cl->doHeapRegion(cur) && false) { | |
2665 cl->incomplete(); | |
2666 return; | |
2667 } | |
2668 cur = next; | |
2669 } | |
2670 cur = g1_policy()->collection_set(); | |
2671 while (cur != r) { | |
2672 HeapRegion* next = cur->next_in_collection_set(); | |
2673 if (cl->doHeapRegion(cur) && false) { | |
2674 cl->incomplete(); | |
2675 return; | |
2676 } | |
2677 cur = next; | |
2678 } | |
2679 } | |
2680 | |
2681 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2682 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2683 } | |
2684 | |
2685 | |
2686 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2687 Space* res = heap_region_containing(addr); | |
2688 if (res == NULL) | |
2689 res = perm_gen()->space_containing(addr); | |
2690 return res; | |
2691 } | |
2692 | |
2693 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2694 Space* sp = space_containing(addr); | |
2695 if (sp != NULL) { | |
2696 return sp->block_start(addr); | |
2697 } | |
2698 return NULL; | |
2699 } | |
2700 | |
2701 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2702 Space* sp = space_containing(addr); | |
2703 assert(sp != NULL, "block_size of address outside of heap"); | |
2704 return sp->block_size(addr); | |
2705 } | |
2706 | |
2707 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2708 Space* sp = space_containing(addr); | |
2709 return sp->block_is_obj(addr); | |
2710 } | |
2711 | |
2712 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2713 return true; | |
2714 } | |
2715 | |
2716 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2717 return HeapRegion::GrainBytes; | |
2718 } | |
2719 | |
2720 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2721 // Return the remaining space in the cur alloc region, but not less than | |
2722 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2723 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2724 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2725 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2726 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2727 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2728 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2729 // between when we test for NULL and when we use it later. |
342 | 2730 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2731 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2732 |
342 | 2733 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2734 return max_tlab_size; |
342 | 2735 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2736 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2737 max_tlab_size); |
342 | 2738 } |
2739 } | |
2740 | |
2741 size_t G1CollectedHeap::large_typearray_limit() { | |
2742 // FIXME | |
2743 return HeapRegion::GrainBytes/HeapWordSize; | |
2744 } | |
2745 | |
2746 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2747 return g1_reserved_obj_bytes(); |
342 | 2748 } |
2749 | |
2750 jlong G1CollectedHeap::millis_since_last_gc() { | |
2751 // assert(false, "NYI"); | |
2752 return 0; | |
2753 } | |
2754 | |
2755 void G1CollectedHeap::prepare_for_verify() { | |
2756 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2757 ensure_parsability(false); | |
2758 } | |
2759 g1_rem_set()->prepare_for_verify(); | |
2760 } | |
2761 | |
2762 class VerifyLivenessOopClosure: public OopClosure { | |
2763 G1CollectedHeap* g1h; | |
2764 public: | |
2765 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2766 g1h = _g1h; | |
2767 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2768 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2769 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2770 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2771 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2772 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2773 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2774 "Dead object referenced by a not dead object"); |
342 | 2775 } |
2776 }; | |
2777 | |
2778 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2779 private: |
342 | 2780 G1CollectedHeap* _g1h; |
2781 size_t _live_bytes; | |
2782 HeapRegion *_hr; | |
811 | 2783 bool _use_prev_marking; |
342 | 2784 public: |
811 | 2785 // use_prev_marking == true -> use "prev" marking information, |
2786 // use_prev_marking == false -> use "next" marking information | |
2787 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2788 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2789 _g1h = G1CollectedHeap::heap(); |
2790 } | |
2791 void do_object(oop o) { | |
2792 VerifyLivenessOopClosure isLive(_g1h); | |
2793 assert(o != NULL, "Huh?"); | |
811 | 2794 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2795 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2796 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2797 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2798 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2799 } |
342 | 2800 } |
2801 } | |
2802 size_t live_bytes() { return _live_bytes; } | |
2803 }; | |
2804 | |
2805 class PrintObjsInRegionClosure : public ObjectClosure { | |
2806 HeapRegion *_hr; | |
2807 G1CollectedHeap *_g1; | |
2808 public: | |
2809 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2810 _g1 = G1CollectedHeap::heap(); | |
2811 }; | |
2812 | |
2813 void do_object(oop o) { | |
2814 if (o != NULL) { | |
2815 HeapWord *start = (HeapWord *) o; | |
2816 size_t word_sz = o->size(); | |
2817 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2818 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2819 (void*) o, word_sz, | |
2820 _g1->isMarkedPrev(o), | |
2821 _g1->isMarkedNext(o), | |
2822 _hr->obj_allocated_since_prev_marking(o)); | |
2823 HeapWord *end = start + word_sz; | |
2824 HeapWord *cur; | |
2825 int *val; | |
2826 for (cur = start; cur < end; cur++) { | |
2827 val = (int *) cur; | |
2828 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2829 } | |
2830 } | |
2831 } | |
2832 }; | |
2833 | |
2834 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2835 private: |
342 | 2836 bool _allow_dirty; |
390 | 2837 bool _par; |
811 | 2838 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2839 bool _failures; |
811 | 2840 public: |
2841 // use_prev_marking == true -> use "prev" marking information, | |
2842 // use_prev_marking == false -> use "next" marking information | |
2843 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2844 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2845 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2846 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2847 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2848 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2849 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2850 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2851 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2852 |
342 | 2853 bool doHeapRegion(HeapRegion* r) { |
390 | 2854 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2855 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2856 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2857 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2858 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2859 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2860 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2861 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2862 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2863 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2864 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2865 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2866 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2867 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2868 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2869 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2870 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2871 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2872 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2873 } |
342 | 2874 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2875 return false; // stop the region iteration if we hit a failure |
342 | 2876 } |
2877 }; | |
2878 | |
2879 class VerifyRootsClosure: public OopsInGenClosure { | |
2880 private: | |
2881 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2882 bool _use_prev_marking; |
342 | 2883 bool _failures; |
2884 public: | |
811 | 2885 // use_prev_marking == true -> use "prev" marking information, |
2886 // use_prev_marking == false -> use "next" marking information | |
2887 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2888 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2889 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2890 _failures(false) { } |
342 | 2891 |
2892 bool failures() { return _failures; } | |
2893 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2894 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2895 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2896 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2897 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2898 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2899 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2900 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2901 obj->print_on(gclog_or_tty); |
2902 _failures = true; | |
2903 } | |
2904 } | |
2905 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2906 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2907 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2908 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2909 }; |
2910 | |
390 | 2911 // This is the task used for parallel heap verification. |
2912 | |
2913 class G1ParVerifyTask: public AbstractGangTask { | |
2914 private: | |
2915 G1CollectedHeap* _g1h; | |
2916 bool _allow_dirty; | |
811 | 2917 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2918 bool _failures; |
390 | 2919 |
2920 public: | |
811 | 2921 // use_prev_marking == true -> use "prev" marking information, |
2922 // use_prev_marking == false -> use "next" marking information | |
2923 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2924 bool use_prev_marking) : | |
390 | 2925 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2926 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2927 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2928 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2929 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2930 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2931 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2932 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2933 } |
390 | 2934 |
2935 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2936 HandleMark hm; |
811 | 2937 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2938 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2939 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2940 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2941 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2942 } |
390 | 2943 } |
2944 }; | |
2945 | |
342 | 2946 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2947 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2948 } | |
2949 | |
2950 void G1CollectedHeap::verify(bool allow_dirty, | |
2951 bool silent, | |
2952 bool use_prev_marking) { | |
342 | 2953 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2954 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2955 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2956 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2957 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2958 false, |
342 | 2959 SharedHeap::SO_AllClasses, |
2960 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2961 &blobsCl, |
342 | 2962 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2963 bool failures = rootsCl.failures(); |
342 | 2964 rem_set()->invalidate(perm_gen()->used_region(), false); |
2152 | 2965 if (!silent) { gclog_or_tty->print("HeapRegionSets "); } |
2966 verify_region_sets(); | |
2967 if (!silent) { gclog_or_tty->print("HeapRegions "); } | |
390 | 2968 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2969 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2970 "sanity check"); | |
2971 | |
811 | 2972 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2973 int n_workers = workers()->total_workers(); |
2974 set_par_threads(n_workers); | |
2975 workers()->run_task(&task); | |
2976 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2977 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2978 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2979 } |
390 | 2980 |
2981 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2982 "sanity check"); | |
2983 | |
2984 reset_heap_region_claim_values(); | |
2985 | |
2986 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2987 "sanity check"); | |
2988 } else { | |
811 | 2989 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2990 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2991 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2992 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2993 } |
390 | 2994 } |
2152 | 2995 if (!silent) gclog_or_tty->print("RemSet "); |
342 | 2996 rem_set()->verify(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2997 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2998 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2999 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3000 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3001 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
3002 #ifndef PRODUCT |
1044 | 3003 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 3004 concurrent_mark()->print_reachable("at-verification-failure", |
3005 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3006 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
3007 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3008 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3009 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3010 guarantee(!failures, "there should not have been any failures"); |
342 | 3011 } else { |
3012 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
3013 } | |
3014 } | |
3015 | |
3016 class PrintRegionClosure: public HeapRegionClosure { | |
3017 outputStream* _st; | |
3018 public: | |
3019 PrintRegionClosure(outputStream* st) : _st(st) {} | |
3020 bool doHeapRegion(HeapRegion* r) { | |
3021 r->print_on(_st); | |
3022 return false; | |
3023 } | |
3024 }; | |
3025 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3026 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 3027 |
3028 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3029 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3030 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3031 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3032 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3033 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3034 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
3035 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3036 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3037 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3038 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3039 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3040 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3041 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3042 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3043 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3044 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3045 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3046 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3047 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3048 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3049 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3050 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3051 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
3052 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3053 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3054 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3055 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3056 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3057 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 3058 PrintRegionClosure blk(st); |
3059 _hrs->iterate(&blk); | |
3060 } | |
3061 | |
3062 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3063 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 3064 workers()->print_worker_threads_on(st); |
3065 } | |
3066 _cmThread->print_on(st); | |
342 | 3067 st->cr(); |
1019 | 3068 _cm->print_worker_threads_on(st); |
3069 _cg1r->print_worker_threads_on(st); | |
342 | 3070 st->cr(); |
3071 } | |
3072 | |
3073 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3074 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3075 workers()->threads_do(tc); |
3076 } | |
3077 tc->do_thread(_cmThread); | |
794 | 3078 _cg1r->threads_do(tc); |
342 | 3079 } |
3080 | |
3081 void G1CollectedHeap::print_tracing_info() const { | |
3082 // We'll overload this to mean "trace GC pause statistics." | |
3083 if (TraceGen0Time || TraceGen1Time) { | |
3084 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
3085 // to that. | |
3086 g1_policy()->print_tracing_info(); | |
3087 } | |
751 | 3088 if (G1SummarizeRSetStats) { |
342 | 3089 g1_rem_set()->print_summary_info(); |
3090 } | |
1282 | 3091 if (G1SummarizeConcMark) { |
342 | 3092 concurrent_mark()->print_summary_info(); |
3093 } | |
3094 g1_policy()->print_yg_surv_rate_info(); | |
3095 SpecializationStats::print(); | |
3096 } | |
3097 | |
3098 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
3099 HeapRegion* hr = heap_region_containing(addr); | |
3100 if (hr == NULL) { | |
3101 return 0; | |
3102 } else { | |
3103 return 1; | |
3104 } | |
3105 } | |
3106 | |
3107 G1CollectedHeap* G1CollectedHeap::heap() { | |
3108 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
3109 "not a garbage-first heap"); | |
3110 return _g1h; | |
3111 } | |
3112 | |
3113 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3114 // always_do_update_barrier = false; |
342 | 3115 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3116 // Call allocation profiler | |
3117 AllocationProfiler::iterate_since_last_gc(); | |
3118 // Fill TLAB's and such | |
3119 ensure_parsability(true); | |
3120 } | |
3121 | |
3122 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
3123 // FIXME: what is this about? | |
3124 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
3125 // is set. | |
3126 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
3127 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
3128 // always_do_update_barrier = true; |
342 | 3129 } |
3130 | |
1973 | 3131 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
3132 unsigned int gc_count_before, | |
3133 bool* succeeded) { | |
3134 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3135 g1_policy()->record_stop_world_start(); |
1973 | 3136 VM_G1IncCollectionPause op(gc_count_before, |
3137 word_size, | |
3138 false, /* should_initiate_conc_mark */ | |
3139 g1_policy()->max_pause_time_ms(), | |
3140 GCCause::_g1_inc_collection_pause); | |
3141 VMThread::execute(&op); | |
3142 | |
3143 HeapWord* result = op.result(); | |
3144 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3145 assert(result == NULL || ret_succeeded, | |
3146 "the result should be NULL if the VM did not succeed"); | |
3147 *succeeded = ret_succeeded; | |
3148 | |
3149 assert_heap_not_locked(); | |
3150 return result; | |
342 | 3151 } |
3152 | |
3153 void | |
3154 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3155 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3156 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3157 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3158 CGC_lock->notify(); |
342 | 3159 } |
3160 } | |
3161 | |
3162 class VerifyMarkedObjsClosure: public ObjectClosure { | |
3163 G1CollectedHeap* _g1h; | |
3164 public: | |
3165 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
3166 void do_object(oop obj) { | |
3167 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
3168 "markandsweep mark should agree with concurrent deadness"); | |
3169 } | |
3170 }; | |
3171 | |
3172 void | |
3173 G1CollectedHeap::checkConcurrentMark() { | |
3174 VerifyMarkedObjsClosure verifycl(this); | |
3175 // MutexLockerEx x(getMarkBitMapLock(), | |
3176 // Mutex::_no_safepoint_check_flag); | |
678 | 3177 object_iterate(&verifycl, false); |
342 | 3178 } |
3179 | |
3180 void G1CollectedHeap::do_sync_mark() { | |
3181 _cm->checkpointRootsInitial(); | |
3182 _cm->markFromRoots(); | |
3183 _cm->checkpointRootsFinal(false); | |
3184 } | |
3185 | |
3186 // <NEW PREDICTION> | |
3187 | |
3188 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3189 bool young) { | |
3190 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3191 } | |
3192 | |
3193 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3194 predicted_time_ms) { | |
3195 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3196 } | |
3197 | |
3198 size_t G1CollectedHeap::pending_card_num() { | |
3199 size_t extra_cards = 0; | |
3200 JavaThread *curr = Threads::first(); | |
3201 while (curr != NULL) { | |
3202 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3203 extra_cards += dcq.size(); | |
3204 curr = curr->next(); | |
3205 } | |
3206 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3207 size_t buffer_size = dcqs.buffer_size(); | |
3208 size_t buffer_num = dcqs.completed_buffers_num(); | |
3209 return buffer_size * buffer_num + extra_cards; | |
3210 } | |
3211 | |
3212 size_t G1CollectedHeap::max_pending_card_num() { | |
3213 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3214 size_t buffer_size = dcqs.buffer_size(); | |
3215 size_t buffer_num = dcqs.completed_buffers_num(); | |
3216 int thread_num = Threads::number_of_threads(); | |
3217 return (buffer_num + thread_num) * buffer_size; | |
3218 } | |
3219 | |
3220 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3221 return g1_rem_set()->cardsScanned(); |
342 | 3222 } |
3223 | |
3224 void | |
3225 G1CollectedHeap::setup_surviving_young_words() { | |
3226 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3227 size_t array_length = g1_policy()->young_cset_length(); | |
3228 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3229 if (_surviving_young_words == NULL) { | |
3230 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3231 "Not enough space for young surv words summary."); | |
3232 } | |
3233 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3234 #ifdef ASSERT |
342 | 3235 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3236 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3237 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3238 #endif // !ASSERT |
342 | 3239 } |
3240 | |
3241 void | |
3242 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3243 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3244 size_t array_length = g1_policy()->young_cset_length(); | |
3245 for (size_t i = 0; i < array_length; ++i) | |
3246 _surviving_young_words[i] += surv_young_words[i]; | |
3247 } | |
3248 | |
3249 void | |
3250 G1CollectedHeap::cleanup_surviving_young_words() { | |
3251 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3252 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3253 _surviving_young_words = NULL; | |
3254 } | |
3255 | |
3256 // </NEW PREDICTION> | |
3257 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3258 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3259 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3260 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3261 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3262 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3263 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3264 |
1709 | 3265 #if TASKQUEUE_STATS |
3266 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3267 st->print_raw_cr("GC Task Stats"); | |
3268 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3269 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3270 } | |
3271 | |
3272 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3273 print_taskqueue_stats_hdr(st); | |
3274 | |
3275 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3276 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3277 for (int i = 0; i < n; ++i) { |
3278 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3279 totals += task_queue(i)->stats; | |
3280 } | |
3281 st->print_raw("tot "); totals.print(st); st->cr(); | |
3282 | |
3283 DEBUG_ONLY(totals.verify()); | |
3284 } | |
3285 | |
3286 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3287 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3288 for (int i = 0; i < n; ++i) { |
3289 task_queue(i)->stats.reset(); | |
3290 } | |
3291 } | |
3292 #endif // TASKQUEUE_STATS | |
3293 | |
1973 | 3294 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3295 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
2152 | 3296 assert_at_safepoint(true /* should_be_vm_thread */); |
3297 guarantee(!is_gc_active(), "collection is not reentrant"); | |
3298 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3299 if (GC_locker::check_active_before_gc()) { |
1973 | 3300 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3301 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3302 |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
2039
diff
changeset
|
3303 SvcGCMarker sgcm(SvcGCMarker::MINOR); |
2039
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3304 ResourceMark rm; |
7c5250dbd584
6896624: G1: hotspot:::gc and hotspot:::mem-pool-gc probes are not fired
tonyp
parents:
2038
diff
changeset
|
3305 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3306 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3307 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3308 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3309 |
2152 | 3310 verify_region_sets_optional(); |
3311 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3312 { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3313 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3314 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3315 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3316 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3317 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3318 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3319 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3320 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3321 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3322 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3323 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3324 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3325 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3326 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3327 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3328 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3329 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3330 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3331 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3332 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3333 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3334 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3335 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3336 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3337 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3338 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3339 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3340 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3341 |
2152 | 3342 // If there are any free regions available on the secondary_free_list |
3343 // make sure we append them to the free_list. However, we don't | |
3344 // have to wait for the rest of the cleanup operation to | |
3345 // finish. If it's still going on that's OK. If we run out of | |
3346 // regions, the region allocation code will check the | |
3347 // secondary_free_list and potentially wait if more free regions | |
3348 // are coming (see new_region_try_secondary_free_list()). | |
3349 if (!G1StressConcRegionFreeing) { | |
3350 append_secondary_free_list_if_not_empty(); | |
3351 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3352 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3353 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3354 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3355 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3356 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3357 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3358 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3359 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3360 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3361 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3362 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3363 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3364 increment_total_collections(false /* full gc */); |
342 | 3365 |
3366 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3367 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3368 print(); |
342 | 3369 #endif |
3370 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3371 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3372 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3373 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3374 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3375 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3376 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3377 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3378 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3379 |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3380 // Please see comment in G1CollectedHeap::ref_processing_init() |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3381 // to see how reference processing currently works in G1. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3382 // |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3383 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3384 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3385 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3386 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3387 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3388 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3389 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3390 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3391 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3392 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3393 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3394 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3395 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3396 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3397 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3398 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3399 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3400 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3401 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3402 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3403 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3404 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3405 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3406 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3407 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3408 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3409 #endif // YOUNG_LIST_VERBOSE |
342 | 3410 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3411 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3412 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3413 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3414 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3415 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3416 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3417 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3418 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3419 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3420 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3421 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3422 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3423 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3424 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3425 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3426 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3427 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3428 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3429 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3430 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3431 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3432 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3433 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3434 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3435 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3436 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3437 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3438 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3439 |
1707 | 3440 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3441 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3442 // Nothing to do if we were unable to choose a collection set. |
342 | 3443 #if G1_REM_SET_LOGGING |
1707 | 3444 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3445 print(); | |
342 | 3446 #endif |
1707 | 3447 PrepareForRSScanningClosure prepare_for_rs_scan; |
3448 collection_set_iterate(&prepare_for_rs_scan); | |
3449 | |
3450 setup_surviving_young_words(); | |
3451 | |
3452 // Set up the gc allocation regions. | |
3453 get_gc_alloc_regions(); | |
3454 | |
3455 // Actually do the work... | |
3456 evacuate_collection_set(); | |
3457 | |
3458 free_collection_set(g1_policy()->collection_set()); | |
3459 g1_policy()->clear_collection_set(); | |
3460 | |
3461 cleanup_surviving_young_words(); | |
3462 | |
3463 // Start a new incremental collection set for the next pause. | |
3464 g1_policy()->start_incremental_cset_building(); | |
3465 | |
3466 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3467 // regions to the incremental collection set for the next | |
3468 // evacuation pause. | |
3469 clear_cset_fast_test(); | |
3470 | |
3471 if (g1_policy()->in_young_gc_mode()) { | |
3472 _young_list->reset_sampled_info(); | |
3473 | |
3474 // Don't check the whole heap at this point as the | |
3475 // GC alloc regions from this pause have been tagged | |
3476 // as survivors and moved on to the survivor list. | |
3477 // Survivor regions will fail the !is_young() check. | |
3478 assert(check_young_list_empty(false /* check_heap */), | |
3479 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3480 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3481 #if YOUNG_LIST_VERBOSE |
1707 | 3482 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3483 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3484 #endif // YOUNG_LIST_VERBOSE |
342 | 3485 |
1707 | 3486 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3487 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3488 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3489 |
1707 | 3490 _young_list->reset_auxilary_lists(); |
342 | 3491 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3492 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3493 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3494 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3495 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3496 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3497 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3498 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3499 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3500 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3501 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3502 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3503 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3504 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3505 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3506 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3507 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3508 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3509 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3510 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3511 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3512 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3513 } |
342 | 3514 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3515 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3516 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3517 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3518 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3519 #endif // YOUNG_LIST_VERBOSE |
342 | 3520 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3521 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3522 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3523 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3524 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3525 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3526 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3527 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3528 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3529 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3530 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3531 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3532 Universe::verify(false); |
342 | 3533 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3534 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3535 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3536 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3537 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3538 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3539 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3540 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3541 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3542 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3543 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3544 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3545 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3546 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3547 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3548 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3549 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3550 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3551 #endif |
342 | 3552 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3553 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3554 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3555 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3556 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3557 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3558 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3559 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3560 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3561 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3562 |
2152 | 3563 verify_region_sets_optional(); |
3564 | |
1709 | 3565 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3566 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3567 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3568 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3569 Universe::print_heap_after_gc(); |
342 | 3570 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3571 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3572 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3573 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3574 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3575 } |
1973 | 3576 |
3577 return true; | |
342 | 3578 } |
3579 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3580 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3581 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3582 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3583 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3584 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3585 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3586 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3587 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3588 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3589 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3590 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3591 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3592 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3593 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3594 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3595 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3596 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3597 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3598 |
342 | 3599 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3600 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3601 // make sure we don't call set_gc_alloc_region() multiple times on |
3602 // the same region | |
3603 assert(r == NULL || !r->is_gc_alloc_region(), | |
3604 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3605 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3606 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3607 |
342 | 3608 HeapWord* original_top = NULL; |
3609 if (r != NULL) | |
3610 original_top = r->top(); | |
3611 | |
3612 // We will want to record the used space in r as being there before gc. | |
3613 // One we install it as a GC alloc region it's eligible for allocation. | |
3614 // So record it now and use it later. | |
3615 size_t r_used = 0; | |
3616 if (r != NULL) { | |
3617 r_used = r->used(); | |
3618 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3619 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3620 // need to take the lock to guard against two threads calling |
3621 // get_gc_alloc_region concurrently (very unlikely but...) | |
3622 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3623 r->save_marks(); | |
3624 } | |
3625 } | |
3626 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3627 _gc_alloc_regions[purpose] = r; | |
3628 if (old_alloc_region != NULL) { | |
3629 // Replace aliases too. | |
3630 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3631 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3632 _gc_alloc_regions[ap] = r; | |
3633 } | |
3634 } | |
3635 } | |
3636 if (r != NULL) { | |
3637 push_gc_alloc_region(r); | |
3638 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3639 // We are using a region as a GC alloc region after it has been used | |
3640 // as a mutator allocation region during the current marking cycle. | |
3641 // The mutator-allocated objects are currently implicitly marked, but | |
3642 // when we move hr->next_top_at_mark_start() forward at the the end | |
3643 // of the GC pause, they won't be. We therefore mark all objects in | |
3644 // the "gap". We do this object-by-object, since marking densely | |
3645 // does not currently work right with marking bitmap iteration. This | |
3646 // means we rely on TLAB filling at the start of pauses, and no | |
3647 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3648 // to fix the marking bitmap iteration. | |
3649 HeapWord* curhw = r->next_top_at_mark_start(); | |
3650 HeapWord* t = original_top; | |
3651 | |
3652 while (curhw < t) { | |
3653 oop cur = (oop)curhw; | |
3654 // We'll assume parallel for generality. This is rare code. | |
3655 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3656 curhw = curhw + cur->size(); | |
3657 } | |
3658 assert(curhw == t, "Should have parsed correctly."); | |
3659 } | |
3660 if (G1PolicyVerbose > 1) { | |
3661 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3662 "for survivors:", r->bottom(), original_top, r->end()); | |
3663 r->print(); | |
3664 } | |
3665 g1_policy()->record_before_bytes(r_used); | |
3666 } | |
3667 } | |
3668 | |
3669 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3670 assert(Thread::current()->is_VM_thread() || | |
2152 | 3671 FreeList_lock->owned_by_self(), "Precondition"); |
342 | 3672 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
3673 "Precondition."); | |
3674 hr->set_is_gc_alloc_region(true); | |
3675 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3676 _gc_alloc_region_list = hr; | |
3677 } | |
3678 | |
3679 #ifdef G1_DEBUG | |
3680 class FindGCAllocRegion: public HeapRegionClosure { | |
3681 public: | |
3682 bool doHeapRegion(HeapRegion* r) { | |
3683 if (r->is_gc_alloc_region()) { | |
3684 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3685 r->hrs_index(), r->bottom()); | |
3686 } | |
3687 return false; | |
3688 } | |
3689 }; | |
3690 #endif // G1_DEBUG | |
3691 | |
3692 void G1CollectedHeap::forget_alloc_region_list() { | |
2152 | 3693 assert_at_safepoint(true /* should_be_vm_thread */); |
342 | 3694 while (_gc_alloc_region_list != NULL) { |
3695 HeapRegion* r = _gc_alloc_region_list; | |
3696 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3697 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3698 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3699 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3700 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3701 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3702 r->ContiguousSpace::set_saved_mark(); |
342 | 3703 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3704 r->set_next_gc_alloc_region(NULL); | |
3705 r->set_is_gc_alloc_region(false); | |
545 | 3706 if (r->is_survivor()) { |
3707 if (r->is_empty()) { | |
3708 r->set_not_young(); | |
3709 } else { | |
3710 _young_list->add_survivor_region(r); | |
3711 } | |
3712 } | |
342 | 3713 } |
3714 #ifdef G1_DEBUG | |
3715 FindGCAllocRegion fa; | |
3716 heap_region_iterate(&fa); | |
3717 #endif // G1_DEBUG | |
3718 } | |
3719 | |
3720 | |
3721 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3722 // TODO: allocation regions check | |
3723 return true; | |
3724 } | |
3725 | |
3726 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3727 // First, let's check that the GC alloc region list is empty (it should) |
3728 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3729 | |
342 | 3730 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3731 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3732 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3733 |
342 | 3734 // Create new GC alloc regions. |
636 | 3735 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3736 _retained_gc_alloc_regions[ap] = NULL; | |
3737 | |
3738 if (alloc_region != NULL) { | |
3739 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3740 | |
3741 // let's make sure that the GC alloc region is not tagged as such | |
3742 // outside a GC operation | |
3743 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3744 | |
3745 if (alloc_region->in_collection_set() || | |
3746 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3747 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3748 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3749 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3750 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3751 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3752 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3753 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3754 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3755 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3756 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3757 // object that may be less than the region size). |
636 | 3758 |
3759 alloc_region = NULL; | |
3760 } | |
3761 } | |
3762 | |
3763 if (alloc_region == NULL) { | |
3764 // we will get a new GC alloc region | |
2152 | 3765 alloc_region = new_gc_alloc_region(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3766 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3767 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3768 ++_gc_alloc_region_counts[ap]; |
1388 | 3769 if (G1PrintHeapRegions) { |
3770 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3771 "top "PTR_FORMAT, | |
3772 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3773 } | |
342 | 3774 } |
636 | 3775 |
342 | 3776 if (alloc_region != NULL) { |
636 | 3777 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3778 set_gc_alloc_region(ap, alloc_region); |
3779 } | |
636 | 3780 |
3781 assert(_gc_alloc_regions[ap] == NULL || | |
3782 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3783 "the GC alloc region should be tagged as such"); | |
3784 assert(_gc_alloc_regions[ap] == NULL || | |
3785 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3786 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3787 } |
3788 // Set alternative regions for allocation purposes that have reached | |
636 | 3789 // their limit. |
342 | 3790 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3791 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3792 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3793 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3794 } | |
3795 } | |
3796 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3797 } | |
3798 | |
636 | 3799 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3800 // We keep a separate list of all regions that have been alloc regions in |
636 | 3801 // the current collection pause. Forget that now. This method will |
3802 // untag the GC alloc regions and tear down the GC alloc region | |
3803 // list. It's desirable that no regions are tagged as GC alloc | |
3804 // outside GCs. | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
3805 |
342 | 3806 forget_alloc_region_list(); |
3807 | |
3808 // The current alloc regions contain objs that have survived | |
3809 // collection. Make them no longer GC alloc regions. | |
3810 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3811 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3812 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3813 _gc_alloc_region_counts[ap] = 0; |
636 | 3814 |
3815 if (r != NULL) { | |
3816 // we retain nothing on _gc_alloc_regions between GCs | |
3817 set_gc_alloc_region(ap, NULL); | |
3818 | |
3819 if (r->is_empty()) { | |
2152 | 3820 // We didn't actually allocate anything in it; let's just put |
3821 // it back on the free list. | |
3822 _free_list.add_as_tail(r); | |
636 | 3823 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3824 // retain it so that we can use it at the beginning of the next GC | |
3825 _retained_gc_alloc_regions[ap] = r; | |
342 | 3826 } |
3827 } | |
636 | 3828 } |
3829 } | |
3830 | |
3831 #ifndef PRODUCT | |
3832 // Useful for debugging | |
3833 | |
3834 void G1CollectedHeap::print_gc_alloc_regions() { | |
3835 gclog_or_tty->print_cr("GC alloc regions"); | |
3836 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3837 HeapRegion* r = _gc_alloc_regions[ap]; | |
3838 if (r == NULL) { | |
3839 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3840 } else { | |
3841 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3842 ap, r->bottom(), r->used()); | |
3843 } | |
3844 } | |
3845 } | |
3846 #endif // PRODUCT | |
342 | 3847 |
3848 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3849 _drain_in_progress = false; | |
3850 set_evac_failure_closure(cl); | |
3851 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3852 } | |
3853 | |
3854 void G1CollectedHeap::finalize_for_evac_failure() { | |
3855 assert(_evac_failure_scan_stack != NULL && | |
3856 _evac_failure_scan_stack->length() == 0, | |
3857 "Postcondition"); | |
3858 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3859 delete _evac_failure_scan_stack; |
342 | 3860 _evac_failure_scan_stack = NULL; |
3861 } | |
3862 | |
3863 | |
3864 | |
3865 // *** Sequential G1 Evacuation | |
3866 | |
3867 class G1IsAliveClosure: public BoolObjectClosure { | |
3868 G1CollectedHeap* _g1; | |
3869 public: | |
3870 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3871 void do_object(oop p) { assert(false, "Do not call."); } | |
3872 bool do_object_b(oop p) { | |
3873 // It is reachable if it is outside the collection set, or is inside | |
3874 // and forwarded. | |
3875 | |
3876 #ifdef G1_DEBUG | |
3877 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3878 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3879 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3880 #endif // G1_DEBUG | |
3881 | |
3882 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3883 } | |
3884 }; | |
3885 | |
3886 class G1KeepAliveClosure: public OopClosure { | |
3887 G1CollectedHeap* _g1; | |
3888 public: | |
3889 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3890 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3891 void do_oop( oop* p) { |
342 | 3892 oop obj = *p; |
3893 #ifdef G1_DEBUG | |
3894 if (PrintGC && Verbose) { | |
3895 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3896 p, (void*) obj, (void*) *p); | |
3897 } | |
3898 #endif // G1_DEBUG | |
3899 | |
3900 if (_g1->obj_in_cs(obj)) { | |
3901 assert( obj->is_forwarded(), "invariant" ); | |
3902 *p = obj->forwardee(); | |
3903 #ifdef G1_DEBUG | |
3904 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3905 (void*) obj, (void*) *p); | |
3906 #endif // G1_DEBUG | |
3907 } | |
3908 } | |
3909 }; | |
3910 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3911 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3912 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3913 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3914 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3915 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3916 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3917 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3918 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3919 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3920 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3921 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3922 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3923 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3924 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3925 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3926 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3927 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3928 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3929 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3930 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3931 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3932 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3933 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3934 |
342 | 3935 class RemoveSelfPointerClosure: public ObjectClosure { |
3936 private: | |
3937 G1CollectedHeap* _g1; | |
3938 ConcurrentMark* _cm; | |
3939 HeapRegion* _hr; | |
3940 size_t _prev_marked_bytes; | |
3941 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3942 OopsInHeapRegionClosure *_cl; |
342 | 3943 public: |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3944 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3945 OopsInHeapRegionClosure* cl) : |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3946 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3947 _next_marked_bytes(0), _cl(cl) {} |
342 | 3948 |
3949 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3950 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3951 | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3952 // <original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3953 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3954 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3955 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3956 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3957 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3958 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3959 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3960 // would point into middle of the filler object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3961 // The current approach is to not coalesce and leave the BOT contents intact. |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3962 // </original comment> |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3963 // |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3964 // We now reset the BOT when we start the object iteration over the |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3965 // region and refine its entries for every object we come across. So |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3966 // the above comment is not really relevant and we should be able |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3967 // to coalesce dead objects if we want to. |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3968 void do_object(oop obj) { |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3969 HeapWord* obj_addr = (HeapWord*) obj; |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3970 assert(_hr->is_in(obj_addr), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3971 size_t obj_size = obj->size(); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3972 _hr->update_bot_for_object(obj_addr, obj_size); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3973 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3974 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3975 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3976 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3977 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
3978 _prev_marked_bytes += (obj_size * HeapWordSize); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3979 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3980 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3981 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3982 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3983 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3984 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3985 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3986 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3987 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3988 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3989 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3990 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3991 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3992 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3993 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3994 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3995 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3996 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3997 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3998 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3999 // dummy object. |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4000 MemRegion mr((HeapWord*)obj, obj_size); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4001 CollectedHeap::fill_with_object(mr); |
342 | 4002 _cm->clearRangeBothMaps(mr); |
4003 } | |
4004 } | |
4005 }; | |
4006 | |
4007 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 4008 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4009 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4010 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4011 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4012 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4013 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4014 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4015 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4016 } |
342 | 4017 HeapRegion* cur = g1_policy()->collection_set(); |
4018 while (cur != NULL) { | |
4019 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4020 assert(!cur->isHumongous(), "sanity"); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4021 |
342 | 4022 if (cur->evacuation_failed()) { |
4023 assert(cur->in_collection_set(), "bad CS"); | |
2133
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4024 RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4025 |
2250ee17e258
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
2039
diff
changeset
|
4026 cur->reset_bot(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4027 cl->set_region(cur); |
342 | 4028 cur->object_iterate(&rspc); |
4029 | |
4030 // A number of manipulations to make the TAMS be the current top, | |
4031 // and the marked bytes be the ones observed in the iteration. | |
4032 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
4033 // The comments below are the postconditions achieved by the | |
4034 // calls. Note especially the last such condition, which says that | |
4035 // the count of marked bytes has been properly restored. | |
4036 cur->note_start_of_marking(false); | |
4037 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
4038 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
4039 // _next_marked_bytes == prev_marked_bytes. | |
4040 cur->note_end_of_marking(); | |
4041 // _prev_top_at_mark_start == top(), | |
4042 // _prev_marked_bytes == prev_marked_bytes | |
4043 } | |
4044 // If there is no mark in progress, we modified the _next variables | |
4045 // above needlessly, but harmlessly. | |
4046 if (_g1h->mark_in_progress()) { | |
4047 cur->note_start_of_marking(false); | |
4048 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
4049 // _next_marked_bytes == next_marked_bytes. | |
4050 } | |
4051 | |
4052 // Now make sure the region has the right index in the sorted array. | |
4053 g1_policy()->note_change_in_marked_bytes(cur); | |
4054 } | |
4055 cur = cur->next_in_collection_set(); | |
4056 } | |
4057 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
4058 | |
4059 // Now restore saved marks, if any. | |
4060 if (_objs_with_preserved_marks != NULL) { | |
4061 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
4062 guarantee(_objs_with_preserved_marks->length() == | |
4063 _preserved_marks_of_objs->length(), "Both or none."); | |
4064 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
4065 oop obj = _objs_with_preserved_marks->at(i); | |
4066 markOop m = _preserved_marks_of_objs->at(i); | |
4067 obj->set_mark(m); | |
4068 } | |
4069 // Delete the preserved marks growable arrays (allocated on the C heap). | |
4070 delete _objs_with_preserved_marks; | |
4071 delete _preserved_marks_of_objs; | |
4072 _objs_with_preserved_marks = NULL; | |
4073 _preserved_marks_of_objs = NULL; | |
4074 } | |
4075 } | |
4076 | |
4077 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
4078 _evac_failure_scan_stack->push(obj); | |
4079 } | |
4080 | |
4081 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
4082 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
4083 | |
4084 while (_evac_failure_scan_stack->length() > 0) { | |
4085 oop obj = _evac_failure_scan_stack->pop(); | |
4086 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
4087 obj->oop_iterate_backwards(_evac_failure_closure); | |
4088 } | |
4089 } | |
4090 | |
4091 oop | |
4092 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
4093 oop old) { | |
4094 markOop m = old->mark(); | |
4095 oop forward_ptr = old->forward_to_atomic(old); | |
4096 if (forward_ptr == NULL) { | |
4097 // Forward-to-self succeeded. | |
4098 if (_evac_failure_closure != cl) { | |
4099 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
4100 assert(!_drain_in_progress, | |
4101 "Should only be true while someone holds the lock."); | |
4102 // Set the global evac-failure closure to the current thread's. | |
4103 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
4104 set_evac_failure_closure(cl); | |
4105 // Now do the common part. | |
4106 handle_evacuation_failure_common(old, m); | |
4107 // Reset to NULL. | |
4108 set_evac_failure_closure(NULL); | |
4109 } else { | |
4110 // The lock is already held, and this is recursive. | |
4111 assert(_drain_in_progress, "This should only be the recursive case."); | |
4112 handle_evacuation_failure_common(old, m); | |
4113 } | |
4114 return old; | |
4115 } else { | |
4116 // Someone else had a place to copy it. | |
4117 return forward_ptr; | |
4118 } | |
4119 } | |
4120 | |
4121 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
4122 set_evacuation_failed(true); | |
4123 | |
4124 preserve_mark_if_necessary(old, m); | |
4125 | |
4126 HeapRegion* r = heap_region_containing(old); | |
4127 if (!r->evacuation_failed()) { | |
4128 r->set_evacuation_failed(true); | |
1282 | 4129 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4130 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 4131 "["PTR_FORMAT","PTR_FORMAT")\n", |
4132 r, r->bottom(), r->end()); | |
4133 } | |
4134 } | |
4135 | |
4136 push_on_evac_failure_scan_stack(old); | |
4137 | |
4138 if (!_drain_in_progress) { | |
4139 // prevent recursion in copy_to_survivor_space() | |
4140 _drain_in_progress = true; | |
4141 drain_evac_failure_scan_stack(); | |
4142 _drain_in_progress = false; | |
4143 } | |
4144 } | |
4145 | |
4146 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4147 assert(evacuation_failed(), "Oversaving!"); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4148 // We want to call the "for_promotion_failure" version only in the |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4149 // case of a promotion failure. |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
2037
diff
changeset
|
4150 if (m->must_be_preserved_for_promotion_failure(obj)) { |
342 | 4151 if (_objs_with_preserved_marks == NULL) { |
4152 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4153 _objs_with_preserved_marks = | |
4154 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4155 _preserved_marks_of_objs = | |
4156 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4157 } | |
4158 _objs_with_preserved_marks->push(obj); | |
4159 _preserved_marks_of_objs->push(m); | |
4160 } | |
4161 } | |
4162 | |
4163 // *** Parallel G1 Evacuation | |
4164 | |
4165 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4166 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4167 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4168 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4169 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4170 |
342 | 4171 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4172 // let the caller handle alloc failure | |
4173 if (alloc_region == NULL) return NULL; | |
4174 | |
4175 HeapWord* block = alloc_region->par_allocate(word_size); | |
4176 if (block == NULL) { | |
4177 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4178 } | |
4179 return block; | |
4180 } | |
4181 | |
545 | 4182 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4183 bool par) { | |
4184 // Another thread might have obtained alloc_region for the given | |
4185 // purpose, and might be attempting to allocate in it, and might | |
4186 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4187 // region below until we're sure the last allocation has happened. | |
4188 // We ensure this by allocating the remaining space with a garbage | |
4189 // object. | |
4190 if (par) par_allocate_remaining_space(alloc_region); | |
4191 // Now we can do the post-GC stuff on the region. | |
4192 alloc_region->note_end_of_copying(); | |
4193 g1_policy()->record_after_bytes(alloc_region->used()); | |
4194 } | |
4195 | |
342 | 4196 HeapWord* |
4197 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4198 HeapRegion* alloc_region, | |
4199 bool par, | |
4200 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4201 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4202 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4203 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4204 |
2152 | 4205 // We need to make sure we serialize calls to this method. Given |
4206 // that the FreeList_lock guards accesses to the free_list anyway, | |
4207 // and we need to potentially remove a region from it, we'll use it | |
4208 // to protect the whole call. | |
4209 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
4210 | |
342 | 4211 HeapWord* block = NULL; |
4212 // In the parallel case, a previous thread to obtain the lock may have | |
4213 // already assigned a new gc_alloc_region. | |
4214 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4215 assert(par, "But should only happen in parallel case."); | |
4216 alloc_region = _gc_alloc_regions[purpose]; | |
4217 if (alloc_region == NULL) return NULL; | |
4218 block = alloc_region->par_allocate(word_size); | |
4219 if (block != NULL) return block; | |
4220 // Otherwise, continue; this new region is empty, too. | |
4221 } | |
4222 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4223 retire_alloc_region(alloc_region, par); |
342 | 4224 |
4225 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4226 // Cannot allocate more regions for the given purpose. | |
4227 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4228 // Is there an alternative? | |
4229 if (purpose != alt_purpose) { | |
4230 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4231 // Has not the alternative region been aliased? | |
545 | 4232 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4233 // Try to allocate in the alternative region. |
4234 if (par) { | |
4235 block = alt_region->par_allocate(word_size); | |
4236 } else { | |
4237 block = alt_region->allocate(word_size); | |
4238 } | |
4239 // Make an alias. | |
4240 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4241 if (block != NULL) { |
4242 return block; | |
4243 } | |
4244 retire_alloc_region(alt_region, par); | |
342 | 4245 } |
4246 // Both the allocation region and the alternative one are full | |
4247 // and aliased, replace them with a new allocation region. | |
4248 purpose = alt_purpose; | |
4249 } else { | |
4250 set_gc_alloc_region(purpose, NULL); | |
4251 return NULL; | |
4252 } | |
4253 } | |
4254 | |
4255 // Now allocate a new region for allocation. | |
2152 | 4256 alloc_region = new_gc_alloc_region(purpose, word_size); |
342 | 4257 |
4258 // let the caller handle alloc failure | |
4259 if (alloc_region != NULL) { | |
4260 | |
4261 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4262 assert(alloc_region->saved_mark_at_top(), | |
4263 "Mark should have been saved already."); | |
4264 // This must be done last: once it's installed, other regions may | |
4265 // allocate in it (without holding the lock.) | |
4266 set_gc_alloc_region(purpose, alloc_region); | |
4267 | |
4268 if (par) { | |
4269 block = alloc_region->par_allocate(word_size); | |
4270 } else { | |
4271 block = alloc_region->allocate(word_size); | |
4272 } | |
4273 // Caller handles alloc failure. | |
4274 } else { | |
4275 // This sets other apis using the same old alloc region to NULL, also. | |
4276 set_gc_alloc_region(purpose, NULL); | |
4277 } | |
4278 return block; // May be NULL. | |
4279 } | |
4280 | |
4281 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4282 HeapWord* block = NULL; | |
4283 size_t free_words; | |
4284 do { | |
4285 free_words = r->free()/HeapWordSize; | |
4286 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4287 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4288 // Otherwise, try to claim it. |
4289 block = r->par_allocate(free_words); | |
4290 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4291 fill_with_object(block, free_words); |
342 | 4292 } |
4293 | |
4294 #ifndef PRODUCT | |
4295 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4296 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4297 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4298 return true; | |
4299 } | |
4300 #endif // PRODUCT | |
4301 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4302 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4303 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4304 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4305 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4306 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4307 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4308 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4309 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4310 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4311 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4312 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4313 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4314 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4315 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4316 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4317 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4318 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4319 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4320 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4321 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4322 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4323 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4324 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4325 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4326 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4327 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4328 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4329 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4330 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4331 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4332 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4333 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4334 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4335 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4336 } |
342 | 4337 |
1709 | 4338 void |
4339 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4340 { | |
4341 st->print_raw_cr("GC Termination Stats"); | |
4342 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4343 " ------waste (KiB)------"); | |
4344 st->print_raw_cr("thr ms ms % ms % attempts" | |
4345 " total alloc undo"); | |
4346 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4347 " ------- ------- -------"); | |
4348 } | |
4349 | |
4350 void | |
4351 G1ParScanThreadState::print_termination_stats(int i, | |
4352 outputStream* const st) const | |
4353 { | |
4354 const double elapsed_ms = elapsed_time() * 1000.0; | |
4355 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4356 const double term_ms = term_time() * 1000.0; | |
4357 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4358 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4359 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4360 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4361 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4362 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4363 alloc_buffer_waste() * HeapWordSize / K, | |
4364 undo_waste() * HeapWordSize / K); | |
4365 } | |
4366 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4367 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4368 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4369 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4370 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4371 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4372 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4373 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4374 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4375 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4376 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4377 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4378 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4379 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4380 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4381 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4382 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4383 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4384 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4385 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4386 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4387 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4388 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4389 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4390 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4391 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4392 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4393 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4394 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4395 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4396 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4397 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4398 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4399 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4400 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4401 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4402 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4403 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4404 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4405 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4406 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4407 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4408 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4409 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4410 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4411 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4412 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4413 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4414 |
342 | 4415 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4416 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4417 _par_scan_state(par_scan_state) { } | |
4418 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4419 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4420 // This is called _after_ do_oop_work has been called, hence after |
4421 // the object has been relocated to its new location and *p points | |
4422 // to its new location. | |
4423 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4424 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4425 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4426 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4427 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 4428 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4429 HeapWord* addr = (HeapWord*)obj; |
342 | 4430 if (_g1->is_in_g1_reserved(addr)) |
4431 _cm->grayRoot(oop(addr)); | |
4432 } | |
4433 } | |
4434 | |
4435 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4436 size_t word_sz = old->size(); | |
4437 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4438 // +1 to make the -1 indexes valid... | |
4439 int young_index = from_region->young_index_in_cset()+1; | |
4440 assert( (from_region->is_young() && young_index > 0) || | |
4441 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4442 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4443 markOop m = old->mark(); | |
545 | 4444 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4445 : m->age(); | |
4446 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4447 word_sz); |
4448 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4449 oop obj = oop(obj_ptr); | |
4450 | |
4451 if (obj_ptr == NULL) { | |
4452 // This will either forward-to-self, or detect that someone else has | |
4453 // installed a forwarding pointer. | |
4454 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4455 return _g1->handle_evacuation_failure_par(cl, old); | |
4456 } | |
4457 | |
526 | 4458 // We're going to allocate linearly, so might as well prefetch ahead. |
4459 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4460 | |
342 | 4461 oop forward_ptr = old->forward_to_atomic(obj); |
4462 if (forward_ptr == NULL) { | |
4463 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4464 if (g1p->track_object_age(alloc_purpose)) { |
4465 // We could simply do obj->incr_age(). However, this causes a | |
4466 // performance issue. obj->incr_age() will first check whether | |
4467 // the object has a displaced mark by checking its mark word; | |
4468 // getting the mark word from the new location of the object | |
4469 // stalls. So, given that we already have the mark word and we | |
4470 // are about to install it anyway, it's better to increase the | |
4471 // age on the mark word, when the object does not have a | |
4472 // displaced mark word. We're not expecting many objects to have | |
4473 // a displaced marked word, so that case is not optimized | |
4474 // further (it could be...) and we simply call obj->incr_age(). | |
4475 | |
4476 if (m->has_displaced_mark_helper()) { | |
4477 // in this case, we have to install the mark word first, | |
4478 // otherwise obj looks to be forwarded (the old mark word, | |
4479 // which contains the forward pointer, was copied) | |
4480 obj->set_mark(m); | |
4481 obj->incr_age(); | |
4482 } else { | |
4483 m = m->incr_age(); | |
545 | 4484 obj->set_mark(m); |
526 | 4485 } |
545 | 4486 _par_scan_state->age_table()->add(obj, word_sz); |
4487 } else { | |
4488 obj->set_mark(m); | |
526 | 4489 } |
4490 | |
342 | 4491 // preserve "next" mark bit |
4492 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4493 if (!use_local_bitmaps || | |
4494 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4495 // if we couldn't mark it on the local bitmap (this happens when | |
4496 // the object was not allocated in the GCLab), we have to bite | |
4497 // the bullet and do the standard parallel mark | |
4498 _cm->markAndGrayObjectIfNecessary(obj); | |
4499 } | |
4500 #if 1 | |
4501 if (_g1->isMarkedNext(old)) { | |
4502 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4503 } | |
4504 #endif | |
4505 } | |
4506 | |
4507 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4508 surv_young_words[young_index] += word_sz; | |
4509 | |
4510 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4511 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4512 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4513 _par_scan_state->push_on_queue(old_p); |
342 | 4514 } else { |
526 | 4515 // No point in using the slower heap_region_containing() method, |
4516 // given that we know obj is in the heap. | |
4517 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4518 obj->oop_iterate_backwards(_scanner); |
4519 } | |
4520 } else { | |
4521 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4522 obj = forward_ptr; | |
4523 } | |
4524 return obj; | |
4525 } | |
4526 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4527 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4528 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4529 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4530 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4531 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4532 assert(barrier != G1BarrierRS || obj != NULL, |
4533 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4534 | |
526 | 4535 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4536 if (_g1->in_cset_fast_test(obj)) { |
342 | 4537 #if G1_REM_SET_LOGGING |
526 | 4538 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4539 "into CS.", p, (void*) obj); | |
342 | 4540 #endif |
526 | 4541 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4542 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4543 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4544 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4545 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4546 } |
526 | 4547 // When scanning the RS, we only care about objs in CS. |
4548 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4549 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4550 } |
526 | 4551 } |
4552 | |
4553 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4554 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4555 } |
4556 | |
4557 if (do_gen_barrier && obj != NULL) { | |
4558 par_do_barrier(p); | |
4559 } | |
4560 } | |
4561 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4562 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4563 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4564 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4565 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4566 assert(has_partial_array_mask(p), "invariant"); |
4567 oop old = clear_partial_array_mask(p); | |
342 | 4568 assert(old->is_objArray(), "must be obj array"); |
4569 assert(old->is_forwarded(), "must be forwarded"); | |
4570 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4571 | |
4572 objArrayOop obj = objArrayOop(old->forwardee()); | |
4573 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4574 // Process ParGCArrayScanChunk elements now | |
4575 // and push the remainder back onto queue | |
4576 int start = arrayOop(old)->length(); | |
4577 int end = obj->length(); | |
4578 int remainder = end - start; | |
4579 assert(start <= end, "just checking"); | |
4580 if (remainder > 2 * ParGCArrayScanChunk) { | |
4581 // Test above combines last partial chunk with a full chunk | |
4582 end = start + ParGCArrayScanChunk; | |
4583 arrayOop(old)->set_length(end); | |
4584 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4585 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4586 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4587 _par_scan_state->push_on_queue(old_p); |
342 | 4588 } else { |
4589 // Restore length so that the heap remains parsable in | |
4590 // case of evacuation failure. | |
4591 arrayOop(old)->set_length(end); | |
4592 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4593 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4594 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4595 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4596 } |
4597 | |
4598 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4599 protected: | |
4600 G1CollectedHeap* _g1h; | |
4601 G1ParScanThreadState* _par_scan_state; | |
4602 RefToScanQueueSet* _queues; | |
4603 ParallelTaskTerminator* _terminator; | |
4604 | |
4605 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4606 RefToScanQueueSet* queues() { return _queues; } | |
4607 ParallelTaskTerminator* terminator() { return _terminator; } | |
4608 | |
4609 public: | |
4610 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4611 G1ParScanThreadState* par_scan_state, | |
4612 RefToScanQueueSet* queues, | |
4613 ParallelTaskTerminator* terminator) | |
4614 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4615 _queues(queues), _terminator(terminator) {} | |
4616 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4617 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4618 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4619 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4620 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4621 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4622 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4623 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4624 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4625 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4626 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4627 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4628 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4629 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4630 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4631 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4632 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4633 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4634 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4635 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4636 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4637 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4638 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4639 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4640 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4641 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4642 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4643 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4644 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4645 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4646 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4647 // we drain the queues as necessary. |
342 | 4648 pss->trim_queue(); |
4649 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4650 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4651 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4652 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4653 } |
342 | 4654 |
4655 class G1ParTask : public AbstractGangTask { | |
4656 protected: | |
4657 G1CollectedHeap* _g1h; | |
4658 RefToScanQueueSet *_queues; | |
4659 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4660 int _n_workers; |
342 | 4661 |
4662 Mutex _stats_lock; | |
4663 Mutex* stats_lock() { return &_stats_lock; } | |
4664 | |
4665 size_t getNCards() { | |
4666 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4667 / G1BlockOffsetSharedArray::N_bytes; | |
4668 } | |
4669 | |
4670 public: | |
4671 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4672 : AbstractGangTask("G1 collection"), | |
4673 _g1h(g1h), | |
4674 _queues(task_queues), | |
4675 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4676 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4677 _n_workers(workers) |
342 | 4678 {} |
4679 | |
4680 RefToScanQueueSet* queues() { return _queues; } | |
4681 | |
4682 RefToScanQueue *work_queue(int i) { | |
4683 return queues()->queue(i); | |
4684 } | |
4685 | |
4686 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4687 if (i >= _n_workers) return; // no work needed this round |
1611 | 4688 |
4689 double start_time_ms = os::elapsedTime() * 1000.0; | |
4690 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4691 | |
342 | 4692 ResourceMark rm; |
4693 HandleMark hm; | |
4694 | |
526 | 4695 G1ParScanThreadState pss(_g1h, i); |
4696 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4697 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4698 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4699 |
4700 pss.set_evac_closure(&scan_evac_cl); | |
4701 pss.set_evac_failure_closure(&evac_failure_cl); | |
4702 pss.set_partial_scan_closure(&partial_scan_cl); | |
4703 | |
4704 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4705 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4706 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4707 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4708 |
342 | 4709 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4710 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4711 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4712 | |
4713 OopsInHeapRegionClosure *scan_root_cl; | |
4714 OopsInHeapRegionClosure *scan_perm_cl; | |
4715 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4716 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4717 scan_root_cl = &scan_mark_root_cl; |
4718 scan_perm_cl = &scan_mark_perm_cl; | |
4719 } else { | |
4720 scan_root_cl = &only_scan_root_cl; | |
4721 scan_perm_cl = &only_scan_perm_cl; | |
4722 } | |
4723 | |
4724 pss.start_strong_roots(); | |
4725 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4726 SharedHeap::SO_AllClasses, | |
4727 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4728 &push_heap_rs_cl, |
342 | 4729 scan_perm_cl, |
4730 i); | |
4731 pss.end_strong_roots(); | |
4732 { | |
4733 double start = os::elapsedTime(); | |
4734 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4735 evac.do_void(); | |
4736 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4737 double term_ms = pss.term_time()*1000.0; | |
4738 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4739 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4740 } |
1282 | 4741 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4742 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4743 | |
4744 // Clean up any par-expanded rem sets. | |
4745 HeapRegionRemSet::par_cleanup(); | |
4746 | |
4747 if (ParallelGCVerbose) { | |
1709 | 4748 MutexLocker x(stats_lock()); |
4749 pss.print_termination_stats(i); | |
342 | 4750 } |
4751 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4752 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4753 double end_time_ms = os::elapsedTime() * 1000.0; |
4754 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4755 } |
4756 }; | |
4757 | |
4758 // *** Common G1 Evacuation Stuff | |
4759 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4760 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4761 |
342 | 4762 void |
4763 G1CollectedHeap:: | |
4764 g1_process_strong_roots(bool collecting_perm_gen, | |
4765 SharedHeap::ScanningOption so, | |
4766 OopClosure* scan_non_heap_roots, | |
4767 OopsInHeapRegionClosure* scan_rs, | |
4768 OopsInGenClosure* scan_perm, | |
4769 int worker_i) { | |
4770 // First scan the strong roots, including the perm gen. | |
4771 double ext_roots_start = os::elapsedTime(); | |
4772 double closure_app_time_sec = 0.0; | |
4773 | |
4774 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4775 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4776 buf_scan_perm.set_generation(perm_gen()); | |
4777 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4778 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4779 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4780 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4781 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4782 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4783 collecting_perm_gen, so, |
342 | 4784 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4785 &eager_scan_code_roots, |
342 | 4786 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4787 |
342 | 4788 // Finish up any enqueued closure apps. |
4789 buf_scan_non_heap_roots.done(); | |
4790 buf_scan_perm.done(); | |
4791 double ext_roots_end = os::elapsedTime(); | |
4792 g1_policy()->reset_obj_copy_time(worker_i); | |
4793 double obj_copy_time_sec = | |
4794 buf_scan_non_heap_roots.closure_app_seconds() + | |
4795 buf_scan_perm.closure_app_seconds(); | |
4796 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4797 double ext_root_time_ms = | |
4798 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4799 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4800 | |
4801 // Scan strong roots in mark stack. | |
4802 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4803 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4804 } | |
4805 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4806 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4807 | |
4808 // XXX What should this be doing in the parallel case? | |
4809 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4810 // Now scan the complement of the collection set. | |
4811 if (scan_rs != NULL) { | |
4812 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4813 } | |
4814 // Finish with the ref_processor roots. | |
4815 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4816 // We need to treat the discovered reference lists as roots and |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4817 // keep entries (which are added by the marking threads) on them |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4818 // live until they can be processed at the end of marking. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4819 ref_processor()->weak_oops_do(scan_non_heap_roots); |
342 | 4820 ref_processor()->oops_do(scan_non_heap_roots); |
4821 } | |
4822 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4823 _process_strong_tasks->all_tasks_completed(); | |
4824 } | |
4825 | |
4826 void | |
4827 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4828 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4829 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4830 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4831 } |
4832 | |
4833 | |
4834 class SaveMarksClosure: public HeapRegionClosure { | |
4835 public: | |
4836 bool doHeapRegion(HeapRegion* r) { | |
4837 r->save_marks(); | |
4838 return false; | |
4839 } | |
4840 }; | |
4841 | |
4842 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4843 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4844 SaveMarksClosure sm; |
4845 heap_region_iterate(&sm); | |
4846 } | |
4847 // We do this even in the parallel case | |
4848 perm_gen()->save_marks(); | |
4849 } | |
4850 | |
4851 void G1CollectedHeap::evacuate_collection_set() { | |
4852 set_evacuation_failed(false); | |
4853 | |
4854 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4855 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4856 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4857 | |
342 | 4858 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4859 set_par_threads(n_workers); | |
4860 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4861 | |
4862 init_for_evac_failure(NULL); | |
4863 | |
4864 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4865 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4866 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4867 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4868 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4869 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4870 StrongRootsScope srs(this); |
1709 | 4871 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4872 workers()->run_task(&g1_par_task); |
4873 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4874 StrongRootsScope srs(this); |
342 | 4875 g1_par_task.work(0); |
4876 } | |
4877 | |
4878 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4879 g1_policy()->record_par_time(par_time); | |
4880 set_par_threads(0); | |
4881 // Is this the right thing to do here? We don't save marks | |
4882 // on individual heap regions when we allocate from | |
4883 // them in parallel, so this seems like the correct place for this. | |
545 | 4884 retire_all_alloc_regions(); |
1974
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4885 |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4886 // Weak root processing. |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4887 // Note: when JSR 292 is enabled and code blobs can contain |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4888 // non-perm oops then we will need to process the code blobs |
fd1d227ef1b9
6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent
johnc
parents:
1973
diff
changeset
|
4889 // here too. |
342 | 4890 { |
4891 G1IsAliveClosure is_alive(this); | |
4892 G1KeepAliveClosure keep_alive(this); | |
4893 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4894 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4895 release_gc_alloc_regions(false /* totally */); |
342 | 4896 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4897 |
889 | 4898 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4899 concurrent_g1_refine()->set_use_cache(true); |
4900 | |
4901 finalize_for_evac_failure(); | |
4902 | |
4903 // Must do this before removing self-forwarding pointers, which clears | |
4904 // the per-region evac-failure flags. | |
4905 concurrent_mark()->complete_marking_in_collection_set(); | |
4906 | |
4907 if (evacuation_failed()) { | |
4908 remove_self_forwarding_pointers(); | |
4909 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4910 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4911 } else if (PrintGC) { |
4912 gclog_or_tty->print("--"); | |
4913 } | |
4914 } | |
4915 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4916 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4917 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4918 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4919 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4920 |
4921 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4922 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4923 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4924 } |
342 | 4925 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4926 } | |
4927 | |
2152 | 4928 void G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr, |
4929 size_t* pre_used, | |
4930 FreeRegionList* free_list, | |
4931 HumongousRegionSet* humongous_proxy_set, | |
4932 bool par) { | |
4933 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { | |
4934 if (hr->isHumongous()) { | |
4935 assert(hr->startsHumongous(), "we should only see starts humongous"); | |
4936 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); | |
4937 } else { | |
4938 free_region(hr, pre_used, free_list, par); | |
342 | 4939 } |
4940 } | |
4941 } | |
4942 | |
2152 | 4943 void G1CollectedHeap::free_region(HeapRegion* hr, |
4944 size_t* pre_used, | |
4945 FreeRegionList* free_list, | |
4946 bool par) { | |
4947 assert(!hr->isHumongous(), "this is only for non-humongous regions"); | |
4948 assert(!hr->is_empty(), "the region should not be empty"); | |
4949 assert(free_list != NULL, "pre-condition"); | |
4950 | |
4951 *pre_used += hr->used(); | |
4952 hr->hr_clear(par, true /* clear_space */); | |
4953 free_list->add_as_tail(hr); | |
4954 } | |
4955 | |
4956 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, | |
4957 size_t* pre_used, | |
4958 FreeRegionList* free_list, | |
4959 HumongousRegionSet* humongous_proxy_set, | |
4960 bool par) { | |
4961 assert(hr->startsHumongous(), "this is only for starts humongous regions"); | |
4962 assert(free_list != NULL, "pre-condition"); | |
4963 assert(humongous_proxy_set != NULL, "pre-condition"); | |
4964 | |
4965 size_t hr_used = hr->used(); | |
4966 size_t hr_capacity = hr->capacity(); | |
4967 size_t hr_pre_used = 0; | |
4968 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); | |
4969 hr->set_notHumongous(); | |
4970 free_region(hr, &hr_pre_used, free_list, par); | |
4971 | |
4972 int i = hr->hrs_index() + 1; | |
4973 size_t num = 1; | |
4974 while ((size_t) i < n_regions()) { | |
4975 HeapRegion* curr_hr = _hrs->at(i); | |
4976 if (!curr_hr->continuesHumongous()) { | |
4977 break; | |
4978 } | |
4979 curr_hr->set_notHumongous(); | |
4980 free_region(curr_hr, &hr_pre_used, free_list, par); | |
4981 num += 1; | |
4982 i += 1; | |
4983 } | |
4984 assert(hr_pre_used == hr_used, | |
4985 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " | |
4986 "should be the same", hr_pre_used, hr_used)); | |
4987 *pre_used += hr_pre_used; | |
4988 } | |
4989 | |
4990 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, | |
4991 FreeRegionList* free_list, | |
4992 HumongousRegionSet* humongous_proxy_set, | |
4993 bool par) { | |
4994 if (pre_used > 0) { | |
4995 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; | |
342 | 4996 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
2152 | 4997 assert(_summary_bytes_used >= pre_used, |
4998 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" " | |
4999 "should be >= pre_used: "SIZE_FORMAT, | |
5000 _summary_bytes_used, pre_used)); | |
342 | 5001 _summary_bytes_used -= pre_used; |
2152 | 5002 } |
5003 if (free_list != NULL && !free_list->is_empty()) { | |
5004 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | |
5005 _free_list.add_as_tail(free_list); | |
5006 } | |
5007 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { | |
5008 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); | |
5009 _humongous_set.update_from_proxy(humongous_proxy_set); | |
342 | 5010 } |
5011 } | |
5012 | |
5013 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
5014 while (list != NULL) { | |
5015 guarantee( list->is_young(), "invariant" ); | |
5016 | |
5017 HeapWord* bottom = list->bottom(); | |
5018 HeapWord* end = list->end(); | |
5019 MemRegion mr(bottom, end); | |
5020 ct_bs->dirty(mr); | |
5021 | |
5022 list = list->get_next_young_region(); | |
5023 } | |
5024 } | |
5025 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5026 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5027 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5028 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5029 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5030 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5031 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5032 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5033 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5034 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5035 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5036 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5037 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5038 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5039 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5040 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5041 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5042 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5043 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5044 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5045 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5046 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5047 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5048 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5049 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5050 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5051 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5052 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5053 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5054 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5055 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5056 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5057 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5058 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5059 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5060 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5061 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5062 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5063 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5064 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5065 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5066 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5067 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5068 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5069 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5070 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5071 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5072 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5073 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5074 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5075 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5076 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5077 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5078 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5079 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5080 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5081 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5082 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5083 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5084 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5085 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5086 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5087 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5088 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5089 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5090 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5091 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5092 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5093 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5094 |
342 | 5095 void G1CollectedHeap::cleanUpCardTable() { |
5096 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
5097 double start = os::elapsedTime(); | |
5098 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5099 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5100 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5101 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5102 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5103 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5104 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5105 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5106 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5107 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5108 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5109 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5110 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5111 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5112 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5113 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5114 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5115 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5116 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
5117 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5118 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5119 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5120 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5121 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5122 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5123 |
342 | 5124 double elapsed = os::elapsedTime() - start; |
5125 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5126 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5127 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5128 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5129 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5130 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
5131 #endif |
342 | 5132 } |
5133 | |
5134 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
2152 | 5135 size_t pre_used = 0; |
5136 FreeRegionList local_free_list("Local List for CSet Freeing"); | |
5137 | |
342 | 5138 double young_time_ms = 0.0; |
5139 double non_young_time_ms = 0.0; | |
5140 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5141 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5142 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5143 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5144 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5145 |
342 | 5146 G1CollectorPolicy* policy = g1_policy(); |
5147 | |
5148 double start_sec = os::elapsedTime(); | |
5149 bool non_young = true; | |
5150 | |
5151 HeapRegion* cur = cs_head; | |
5152 int age_bound = -1; | |
5153 size_t rs_lengths = 0; | |
5154 | |
5155 while (cur != NULL) { | |
2152 | 5156 assert(!is_on_free_list(cur), "sanity"); |
5157 | |
342 | 5158 if (non_young) { |
5159 if (cur->is_young()) { | |
5160 double end_sec = os::elapsedTime(); | |
5161 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5162 non_young_time_ms += elapsed_ms; | |
5163 | |
5164 start_sec = os::elapsedTime(); | |
5165 non_young = false; | |
5166 } | |
5167 } else { | |
2152 | 5168 double end_sec = os::elapsedTime(); |
5169 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5170 young_time_ms += elapsed_ms; | |
5171 | |
5172 start_sec = os::elapsedTime(); | |
5173 non_young = true; | |
342 | 5174 } |
5175 | |
5176 rs_lengths += cur->rem_set()->occupied(); | |
5177 | |
5178 HeapRegion* next = cur->next_in_collection_set(); | |
5179 assert(cur->in_collection_set(), "bad CS"); | |
5180 cur->set_next_in_collection_set(NULL); | |
5181 cur->set_in_collection_set(false); | |
5182 | |
5183 if (cur->is_young()) { | |
5184 int index = cur->young_index_in_cset(); | |
5185 guarantee( index != -1, "invariant" ); | |
5186 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5187 size_t words_survived = _surviving_young_words[index]; | |
5188 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5189 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5190 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5191 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5192 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5193 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5194 cur->set_next_young_region(NULL); |
342 | 5195 } else { |
5196 int index = cur->young_index_in_cset(); | |
5197 guarantee( index == -1, "invariant" ); | |
5198 } | |
5199 | |
5200 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5201 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5202 "invariant" ); | |
5203 | |
5204 if (!cur->evacuation_failed()) { | |
5205 // And the region is empty. | |
2152 | 5206 assert(!cur->is_empty(), "Should not have empty regions in a CS."); |
5207 free_region(cur, &pre_used, &local_free_list, false /* par */); | |
342 | 5208 } else { |
5209 cur->uninstall_surv_rate_group(); | |
5210 if (cur->is_young()) | |
5211 cur->set_young_index_in_cset(-1); | |
5212 cur->set_not_young(); | |
5213 cur->set_evacuation_failed(false); | |
5214 } | |
5215 cur = next; | |
5216 } | |
5217 | |
5218 policy->record_max_rs_lengths(rs_lengths); | |
5219 policy->cset_regions_freed(); | |
5220 | |
5221 double end_sec = os::elapsedTime(); | |
5222 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5223 if (non_young) | |
5224 non_young_time_ms += elapsed_ms; | |
5225 else | |
5226 young_time_ms += elapsed_ms; | |
5227 | |
2152 | 5228 update_sets_after_freeing_regions(pre_used, &local_free_list, |
5229 NULL /* humongous_proxy_set */, | |
5230 false /* par */); | |
342 | 5231 policy->record_young_free_cset_time_ms(young_time_ms); |
5232 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5233 } | |
5234 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5235 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5236 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5237 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5238 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5239 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5240 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5241 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5242 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5243 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5244 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5245 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5246 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5247 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5248 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5249 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5250 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5251 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5252 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5253 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5254 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5255 |
2152 | 5256 void G1CollectedHeap::set_free_regions_coming() { |
5257 if (G1ConcRegionFreeingVerbose) { | |
5258 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5259 "setting free regions coming"); | |
5260 } | |
5261 | |
5262 assert(!free_regions_coming(), "pre-condition"); | |
5263 _free_regions_coming = true; | |
342 | 5264 } |
5265 | |
2152 | 5266 void G1CollectedHeap::reset_free_regions_coming() { |
5267 { | |
5268 assert(free_regions_coming(), "pre-condition"); | |
5269 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5270 _free_regions_coming = false; | |
5271 SecondaryFreeList_lock->notify_all(); | |
5272 } | |
5273 | |
5274 if (G1ConcRegionFreeingVerbose) { | |
5275 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " | |
5276 "reset free regions coming"); | |
342 | 5277 } |
5278 } | |
5279 | |
2152 | 5280 void G1CollectedHeap::wait_while_free_regions_coming() { |
5281 // Most of the time we won't have to wait, so let's do a quick test | |
5282 // first before we take the lock. | |
5283 if (!free_regions_coming()) { | |
5284 return; | |
5285 } | |
5286 | |
5287 if (G1ConcRegionFreeingVerbose) { | |
5288 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5289 "waiting for free regions"); | |
342 | 5290 } |
5291 | |
5292 { | |
2152 | 5293 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
5294 while (free_regions_coming()) { | |
5295 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); | |
342 | 5296 } |
2152 | 5297 } |
5298 | |
5299 if (G1ConcRegionFreeingVerbose) { | |
5300 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " | |
5301 "done waiting for free regions"); | |
5302 } | |
342 | 5303 } |
5304 | |
5305 size_t G1CollectedHeap::n_regions() { | |
5306 return _hrs->length(); | |
5307 } | |
5308 | |
5309 size_t G1CollectedHeap::max_regions() { | |
5310 return | |
5311 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
5312 HeapRegion::GrainBytes; | |
5313 } | |
5314 | |
5315 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5316 assert(heap_lock_held_for_gc(), | |
5317 "the heap lock should already be held by or for this thread"); | |
5318 _young_list->push_region(hr); | |
5319 g1_policy()->set_region_short_lived(hr); | |
5320 } | |
5321 | |
5322 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5323 private: | |
5324 bool _success; | |
5325 public: | |
5326 NoYoungRegionsClosure() : _success(true) { } | |
5327 bool doHeapRegion(HeapRegion* r) { | |
5328 if (r->is_young()) { | |
5329 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5330 r->bottom(), r->end()); | |
5331 _success = false; | |
5332 } | |
5333 return false; | |
5334 } | |
5335 bool success() { return _success; } | |
5336 }; | |
5337 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5338 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5339 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5340 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5341 if (check_heap) { |
342 | 5342 NoYoungRegionsClosure closure; |
5343 heap_region_iterate(&closure); | |
5344 ret = ret && closure.success(); | |
5345 } | |
5346 | |
5347 return ret; | |
5348 } | |
5349 | |
5350 void G1CollectedHeap::empty_young_list() { | |
5351 assert(heap_lock_held_for_gc(), | |
5352 "the heap lock should already be held by or for this thread"); | |
5353 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5354 | |
5355 _young_list->empty_list(); | |
5356 } | |
5357 | |
5358 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5359 bool no_allocs = true; | |
5360 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5361 HeapRegion* r = _gc_alloc_regions[ap]; | |
5362 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5363 } | |
5364 return no_allocs; | |
5365 } | |
5366 | |
545 | 5367 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5368 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5369 HeapRegion* r = _gc_alloc_regions[ap]; | |
5370 if (r != NULL) { | |
5371 // Check for aliases. | |
5372 bool has_processed_alias = false; | |
5373 for (int i = 0; i < ap; ++i) { | |
5374 if (_gc_alloc_regions[i] == r) { | |
5375 has_processed_alias = true; | |
5376 break; | |
5377 } | |
5378 } | |
5379 if (!has_processed_alias) { | |
545 | 5380 retire_alloc_region(r, false /* par */); |
342 | 5381 } |
5382 } | |
5383 } | |
5384 } | |
5385 | |
5386 // Done at the start of full GC. | |
5387 void G1CollectedHeap::tear_down_region_lists() { | |
2152 | 5388 _free_list.remove_all(); |
342 | 5389 } |
5390 | |
5391 class RegionResetter: public HeapRegionClosure { | |
2152 | 5392 G1CollectedHeap* _g1h; |
5393 FreeRegionList _local_free_list; | |
5394 | |
342 | 5395 public: |
2152 | 5396 RegionResetter() : _g1h(G1CollectedHeap::heap()), |
5397 _local_free_list("Local Free List for RegionResetter") { } | |
5398 | |
342 | 5399 bool doHeapRegion(HeapRegion* r) { |
5400 if (r->continuesHumongous()) return false; | |
5401 if (r->top() > r->bottom()) { | |
5402 if (r->top() < r->end()) { | |
5403 Copy::fill_to_words(r->top(), | |
5404 pointer_delta(r->end(), r->top())); | |
5405 } | |
5406 } else { | |
5407 assert(r->is_empty(), "tautology"); | |
2152 | 5408 _local_free_list.add_as_tail(r); |
342 | 5409 } |
5410 return false; | |
5411 } | |
5412 | |
2152 | 5413 void update_free_lists() { |
5414 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, | |
5415 false /* par */); | |
5416 } | |
342 | 5417 }; |
5418 | |
5419 // Done at the end of full GC. | |
5420 void G1CollectedHeap::rebuild_region_lists() { | |
5421 // This needs to go at the end of the full GC. | |
5422 RegionResetter rs; | |
5423 heap_region_iterate(&rs); | |
2152 | 5424 rs.update_free_lists(); |
342 | 5425 } |
5426 | |
5427 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5428 _refine_cte_cl->set_concurrent(concurrent); | |
5429 } | |
5430 | |
2152 | 5431 #ifdef ASSERT |
342 | 5432 |
5433 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5434 HeapRegion* hr = heap_region_containing(p); | |
5435 if (hr == NULL) { | |
5436 return is_in_permanent(p); | |
5437 } else { | |
5438 return hr->is_in(p); | |
5439 } | |
5440 } | |
2152 | 5441 #endif // ASSERT |
5442 | |
5443 class VerifyRegionListsClosure : public HeapRegionClosure { | |
5444 private: | |
5445 HumongousRegionSet* _humongous_set; | |
5446 FreeRegionList* _free_list; | |
5447 size_t _region_count; | |
5448 | |
5449 public: | |
5450 VerifyRegionListsClosure(HumongousRegionSet* humongous_set, | |
5451 FreeRegionList* free_list) : | |
5452 _humongous_set(humongous_set), _free_list(free_list), | |
5453 _region_count(0) { } | |
5454 | |
5455 size_t region_count() { return _region_count; } | |
5456 | |
5457 bool doHeapRegion(HeapRegion* hr) { | |
5458 _region_count += 1; | |
5459 | |
5460 if (hr->continuesHumongous()) { | |
5461 return false; | |
5462 } | |
5463 | |
5464 if (hr->is_young()) { | |
5465 // TODO | |
5466 } else if (hr->startsHumongous()) { | |
5467 _humongous_set->verify_next_region(hr); | |
5468 } else if (hr->is_empty()) { | |
5469 _free_list->verify_next_region(hr); | |
5470 } | |
5471 return false; | |
5472 } | |
5473 }; | |
5474 | |
5475 void G1CollectedHeap::verify_region_sets() { | |
5476 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | |
5477 | |
5478 // First, check the explicit lists. | |
5479 _free_list.verify(); | |
5480 { | |
5481 // Given that a concurrent operation might be adding regions to | |
5482 // the secondary free list we have to take the lock before | |
5483 // verifying it. | |
5484 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5485 _secondary_free_list.verify(); | |
5486 } | |
5487 _humongous_set.verify(); | |
5488 | |
5489 // If a concurrent region freeing operation is in progress it will | |
5490 // be difficult to correctly attributed any free regions we come | |
5491 // across to the correct free list given that they might belong to | |
5492 // one of several (free_list, secondary_free_list, any local lists, | |
5493 // etc.). So, if that's the case we will skip the rest of the | |
5494 // verification operation. Alternatively, waiting for the concurrent | |
5495 // operation to complete will have a non-trivial effect on the GC's | |
5496 // operation (no concurrent operation will last longer than the | |
5497 // interval between two calls to verification) and it might hide | |
5498 // any issues that we would like to catch during testing. | |
5499 if (free_regions_coming()) { | |
5500 return; | |
5501 } | |
5502 | |
5503 { | |
5504 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); | |
5505 // Make sure we append the secondary_free_list on the free_list so | |
5506 // that all free regions we will come across can be safely | |
5507 // attributed to the free_list. | |
5508 append_secondary_free_list(); | |
5509 } | |
5510 | |
5511 // Finally, make sure that the region accounting in the lists is | |
5512 // consistent with what we see in the heap. | |
5513 _humongous_set.verify_start(); | |
5514 _free_list.verify_start(); | |
5515 | |
5516 VerifyRegionListsClosure cl(&_humongous_set, &_free_list); | |
5517 heap_region_iterate(&cl); | |
5518 | |
5519 _humongous_set.verify_end(); | |
5520 _free_list.verify_end(); | |
342 | 5521 } |