Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1973:631f79e71e90
6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr
author | tonyp |
---|---|
date | Tue, 24 Aug 2010 17:24:33 -0400 |
parents | f95d63e2154a |
children | fd1d227ef1b9 |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1547
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "code/icBuffer.hpp" | |
27 #include "gc_implementation/g1/bufferingOopClosure.hpp" | |
28 #include "gc_implementation/g1/concurrentG1Refine.hpp" | |
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" | |
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" | |
31 #include "gc_implementation/g1/concurrentZFThread.hpp" | |
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | |
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | |
34 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | |
36 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
37 #include "gc_implementation/g1/heapRegionRemSet.hpp" | |
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | |
39 #include "gc_implementation/g1/vm_operations_g1.hpp" | |
40 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
41 #include "memory/gcLocker.inline.hpp" | |
42 #include "memory/genOopClosures.inline.hpp" | |
43 #include "memory/generationSpec.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.pcgc.inline.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/vmThread.hpp" | |
342 | 48 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
50 |
342 | 51 // turn it on so that the contents of the young list (scan-only / |
52 // to-be-collected) are printed at "strategic" points before / during | |
53 // / after the collection --- this is useful for debugging | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
54 #define YOUNG_LIST_VERBOSE 0 |
342 | 55 // CURRENT STATUS |
56 // This file is under construction. Search for "FIXME". | |
57 | |
58 // INVARIANTS/NOTES | |
59 // | |
60 // All allocation activity covered by the G1CollectedHeap interface is | |
1973 | 61 // serialized by acquiring the HeapLock. This happens in mem_allocate |
62 // and allocate_new_tlab, which are the "entry" points to the | |
63 // allocation code from the rest of the JVM. (Note that this does not | |
64 // apply to TLAB allocation, which is not part of this interface: it | |
65 // is done by clients of this interface.) | |
342 | 66 |
67 // Local to this file. | |
68 | |
69 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
70 SuspendibleThreadSet* _sts; | |
71 G1RemSet* _g1rs; | |
72 ConcurrentG1Refine* _cg1r; | |
73 bool _concurrent; | |
74 public: | |
75 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
76 G1RemSet* g1rs, | |
77 ConcurrentG1Refine* cg1r) : | |
78 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
79 {} | |
80 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
1705 | 81 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
82 // This path is executed by the concurrent refine or mutator threads, | |
83 // concurrently, and so we do not care if card_ptr contains references | |
84 // that point into the collection set. | |
85 assert(!oops_into_cset, "should be"); | |
86 | |
342 | 87 if (_concurrent && _sts->should_yield()) { |
88 // Caller will actually yield. | |
89 return false; | |
90 } | |
91 // Otherwise, we finished successfully; return true. | |
92 return true; | |
93 } | |
94 void set_concurrent(bool b) { _concurrent = b; } | |
95 }; | |
96 | |
97 | |
98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
99 int _calls; | |
100 G1CollectedHeap* _g1h; | |
101 CardTableModRefBS* _ctbs; | |
102 int _histo[256]; | |
103 public: | |
104 ClearLoggedCardTableEntryClosure() : | |
105 _calls(0) | |
106 { | |
107 _g1h = G1CollectedHeap::heap(); | |
108 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
109 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
110 } | |
111 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
112 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
113 _calls++; | |
114 unsigned char* ujb = (unsigned char*)card_ptr; | |
115 int ind = (int)(*ujb); | |
116 _histo[ind]++; | |
117 *card_ptr = -1; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 void print_histo() { | |
123 gclog_or_tty->print_cr("Card table value histogram:"); | |
124 for (int i = 0; i < 256; i++) { | |
125 if (_histo[i] != 0) { | |
126 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
127 } | |
128 } | |
129 } | |
130 }; | |
131 | |
132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
133 int _calls; | |
134 G1CollectedHeap* _g1h; | |
135 CardTableModRefBS* _ctbs; | |
136 public: | |
137 RedirtyLoggedCardTableEntryClosure() : | |
138 _calls(0) | |
139 { | |
140 _g1h = G1CollectedHeap::heap(); | |
141 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
142 } | |
143 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
144 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
145 _calls++; | |
146 *card_ptr = 0; | |
147 } | |
148 return true; | |
149 } | |
150 int calls() { return _calls; } | |
151 }; | |
152 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
154 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
155 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
156 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
157 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
158 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
159 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
160 |
342 | 161 YoungList::YoungList(G1CollectedHeap* g1h) |
162 : _g1h(g1h), _head(NULL), | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
163 _length(0), |
342 | 164 _last_sampled_rs_lengths(0), |
545 | 165 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 166 { |
167 guarantee( check_list_empty(false), "just making sure..." ); | |
168 } | |
169 | |
170 void YoungList::push_region(HeapRegion *hr) { | |
171 assert(!hr->is_young(), "should not already be young"); | |
172 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
173 | |
174 hr->set_next_young_region(_head); | |
175 _head = hr; | |
176 | |
177 hr->set_young(); | |
178 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
179 ++_length; | |
180 } | |
181 | |
182 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 183 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 184 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
185 | |
186 hr->set_next_young_region(_survivor_head); | |
187 if (_survivor_head == NULL) { | |
545 | 188 _survivor_tail = hr; |
342 | 189 } |
190 _survivor_head = hr; | |
191 | |
192 ++_survivor_length; | |
193 } | |
194 | |
195 void YoungList::empty_list(HeapRegion* list) { | |
196 while (list != NULL) { | |
197 HeapRegion* next = list->get_next_young_region(); | |
198 list->set_next_young_region(NULL); | |
199 list->uninstall_surv_rate_group(); | |
200 list->set_not_young(); | |
201 list = next; | |
202 } | |
203 } | |
204 | |
205 void YoungList::empty_list() { | |
206 assert(check_list_well_formed(), "young list should be well formed"); | |
207 | |
208 empty_list(_head); | |
209 _head = NULL; | |
210 _length = 0; | |
211 | |
212 empty_list(_survivor_head); | |
213 _survivor_head = NULL; | |
545 | 214 _survivor_tail = NULL; |
342 | 215 _survivor_length = 0; |
216 | |
217 _last_sampled_rs_lengths = 0; | |
218 | |
219 assert(check_list_empty(false), "just making sure..."); | |
220 } | |
221 | |
222 bool YoungList::check_list_well_formed() { | |
223 bool ret = true; | |
224 | |
225 size_t length = 0; | |
226 HeapRegion* curr = _head; | |
227 HeapRegion* last = NULL; | |
228 while (curr != NULL) { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
229 if (!curr->is_young()) { |
342 | 230 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
231 "incorrectly tagged (y: %d, surv: %d)", |
342 | 232 curr->bottom(), curr->end(), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
233 curr->is_young(), curr->is_survivor()); |
342 | 234 ret = false; |
235 } | |
236 ++length; | |
237 last = curr; | |
238 curr = curr->get_next_young_region(); | |
239 } | |
240 ret = ret && (length == _length); | |
241 | |
242 if (!ret) { | |
243 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
244 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
245 length, _length); | |
246 } | |
247 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
248 return ret; |
342 | 249 } |
250 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
251 bool YoungList::check_list_empty(bool check_sample) { |
342 | 252 bool ret = true; |
253 | |
254 if (_length != 0) { | |
255 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
256 _length); | |
257 ret = false; | |
258 } | |
259 if (check_sample && _last_sampled_rs_lengths != 0) { | |
260 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
261 ret = false; | |
262 } | |
263 if (_head != NULL) { | |
264 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
265 ret = false; | |
266 } | |
267 if (!ret) { | |
268 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
269 } | |
270 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
271 return ret; |
342 | 272 } |
273 | |
274 void | |
275 YoungList::rs_length_sampling_init() { | |
276 _sampled_rs_lengths = 0; | |
277 _curr = _head; | |
278 } | |
279 | |
280 bool | |
281 YoungList::rs_length_sampling_more() { | |
282 return _curr != NULL; | |
283 } | |
284 | |
285 void | |
286 YoungList::rs_length_sampling_next() { | |
287 assert( _curr != NULL, "invariant" ); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
288 size_t rs_length = _curr->rem_set()->occupied(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
289 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
290 _sampled_rs_lengths += rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
291 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
292 // The current region may not yet have been added to the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
293 // incremental collection set (it gets added when it is |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
294 // retired as the current allocation region). |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
295 if (_curr->in_collection_set()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
296 // Update the collection set policy information for this region |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
297 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
298 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
299 |
342 | 300 _curr = _curr->get_next_young_region(); |
301 if (_curr == NULL) { | |
302 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
303 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
304 } | |
305 } | |
306 | |
307 void | |
308 YoungList::reset_auxilary_lists() { | |
309 guarantee( is_empty(), "young list should be empty" ); | |
310 assert(check_list_well_formed(), "young list should be well formed"); | |
311 | |
312 // Add survivor regions to SurvRateGroup. | |
313 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 314 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
315 |
342 | 316 for (HeapRegion* curr = _survivor_head; |
317 curr != NULL; | |
318 curr = curr->get_next_young_region()) { | |
319 _g1h->g1_policy()->set_region_survivors(curr); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
320 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
321 // The region is a non-empty survivor so let's add it to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
322 // the incremental collection set for the next evacuation |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
323 // pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
324 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
342 | 325 } |
326 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
327 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
328 _head = _survivor_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
329 _length = _survivor_length; |
342 | 330 if (_survivor_head != NULL) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
331 assert(_survivor_tail != NULL, "cause it shouldn't be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
332 assert(_survivor_length > 0, "invariant"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
333 _survivor_tail->set_next_young_region(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
334 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
335 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
336 // Don't clear the survivor list handles until the start of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
337 // the next evacuation pause - we need it in order to re-tag |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
338 // the survivor regions from this evacuation pause as 'young' |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
339 // at the start of the next. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
340 |
545 | 341 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 342 |
343 assert(check_list_well_formed(), "young list should be well formed"); | |
344 } | |
345 | |
346 void YoungList::print() { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
347 HeapRegion* lists[] = {_head, _survivor_head}; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
348 const char* names[] = {"YOUNG", "SURVIVOR"}; |
342 | 349 |
350 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
351 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
352 HeapRegion *curr = lists[list]; | |
353 if (curr == NULL) | |
354 gclog_or_tty->print_cr(" empty"); | |
355 while (curr != NULL) { | |
356 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
357 "age: %4d, y: %d, surv: %d", |
342 | 358 curr->bottom(), curr->end(), |
359 curr->top(), | |
360 curr->prev_top_at_mark_start(), | |
361 curr->next_top_at_mark_start(), | |
362 curr->top_at_conc_mark_count(), | |
363 curr->age_in_surv_rate_group_cond(), | |
364 curr->is_young(), | |
365 curr->is_survivor()); | |
366 curr = curr->get_next_young_region(); | |
367 } | |
368 } | |
369 | |
370 gclog_or_tty->print_cr(""); | |
371 } | |
372 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
374 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
375 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
376 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
377 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
378 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
379 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
380 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
381 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
382 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
383 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
384 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
385 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
386 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
387 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
388 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
389 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
390 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
391 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
392 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
393 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
394 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
395 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
396 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
397 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
398 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
399 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
400 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
401 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
402 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
403 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
405 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
406 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
407 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
408 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
409 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
410 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
411 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
412 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
413 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
414 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
415 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
416 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
417 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
418 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
419 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
420 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
421 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
422 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
423 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
424 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
425 |
342 | 426 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 427 _cg1r->stop(); |
342 | 428 _czft->stop(); |
429 _cmThread->stop(); | |
430 } | |
431 | |
432 | |
433 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
434 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
435 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
436 | |
437 // Count the dirty cards at the start. | |
438 CountNonCleanMemRegionClosure count1(this); | |
439 ct_bs->mod_card_iterate(&count1); | |
440 int orig_count = count1.n(); | |
441 | |
442 // First clear the logged cards. | |
443 ClearLoggedCardTableEntryClosure clear; | |
444 dcqs.set_closure(&clear); | |
445 dcqs.apply_closure_to_all_completed_buffers(); | |
446 dcqs.iterate_closure_all_threads(false); | |
447 clear.print_histo(); | |
448 | |
449 // Now ensure that there's no dirty cards. | |
450 CountNonCleanMemRegionClosure count2(this); | |
451 ct_bs->mod_card_iterate(&count2); | |
452 if (count2.n() != 0) { | |
453 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
454 count2.n(), orig_count); | |
455 } | |
456 guarantee(count2.n() == 0, "Card table should be clean."); | |
457 | |
458 RedirtyLoggedCardTableEntryClosure redirty; | |
459 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
460 dcqs.apply_closure_to_all_completed_buffers(); | |
461 dcqs.iterate_closure_all_threads(false); | |
462 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
463 clear.calls(), orig_count); | |
464 guarantee(redirty.calls() == clear.calls(), | |
465 "Or else mechanism is broken."); | |
466 | |
467 CountNonCleanMemRegionClosure count3(this); | |
468 ct_bs->mod_card_iterate(&count3); | |
469 if (count3.n() != orig_count) { | |
470 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
471 orig_count, count3.n()); | |
472 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
473 } | |
474 | |
475 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
476 } | |
477 | |
478 // Private class members. | |
479 | |
480 G1CollectedHeap* G1CollectedHeap::_g1h; | |
481 | |
482 // Private methods. | |
483 | |
484 // Finds a HeapRegion that can be used to allocate a given size of block. | |
485 | |
486 | |
487 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
488 bool do_expand, | |
489 bool zero_filled) { | |
490 ConcurrentZFThread::note_region_alloc(); | |
491 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
492 if (res == NULL && do_expand) { | |
493 expand(word_size * HeapWordSize); | |
494 res = alloc_free_region_from_lists(zero_filled); | |
495 assert(res == NULL || | |
496 (!res->isHumongous() && | |
497 (!zero_filled || | |
498 res->zero_fill_state() == HeapRegion::Allocated)), | |
499 "Alloc Regions must be zero filled (and non-H)"); | |
500 } | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
501 if (res != NULL) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
502 if (res->is_empty()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
503 _free_regions--; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
504 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
505 assert(!res->isHumongous() && |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
506 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
507 err_msg("Non-young alloc Regions must be zero filled (and non-H):" |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
508 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
509 res->isHumongous(), zero_filled, res->zero_fill_state())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
510 assert(!res->is_on_unclean_list(), |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
511 "Alloc Regions must not be on the unclean list"); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
512 if (G1PrintHeapRegions) { |
342 | 513 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
514 "top "PTR_FORMAT, | |
515 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
516 } | |
517 } | |
518 return res; | |
519 } | |
520 | |
521 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
522 size_t word_size, | |
523 bool zero_filled) { | |
524 HeapRegion* alloc_region = NULL; | |
525 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
526 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
527 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 528 alloc_region->set_survivor(); |
342 | 529 } |
530 ++_gc_alloc_region_counts[purpose]; | |
531 } else { | |
532 g1_policy()->note_alloc_region_limit_reached(purpose); | |
533 } | |
534 return alloc_region; | |
535 } | |
536 | |
537 // If could fit into free regions w/o expansion, try. | |
538 // Otherwise, if can expand, do so. | |
539 // Otherwise, if using ex regions might help, try with ex given back. | |
1973 | 540 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
541 assert_heap_locked_or_at_safepoint(); | |
342 | 542 assert(regions_accounted_for(), "Region leakage!"); |
543 | |
1973 | 544 // We can't allocate humongous regions while cleanupComplete is |
545 // running, since some of the regions we find to be empty might not | |
546 // yet be added to the unclean list. If we're already at a | |
547 // safepoint, this call is unnecessary, not to mention wrong. | |
548 if (!SafepointSynchronize::is_at_safepoint()) { | |
342 | 549 wait_for_cleanup_complete(); |
1973 | 550 } |
342 | 551 |
552 size_t num_regions = | |
1973 | 553 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
342 | 554 |
555 // Special case if < one region??? | |
556 | |
557 // Remember the ft size. | |
558 size_t x_size = expansion_regions(); | |
559 | |
560 HeapWord* res = NULL; | |
561 bool eliminated_allocated_from_lists = false; | |
562 | |
563 // Can the allocation potentially fit in the free regions? | |
564 if (free_regions() >= num_regions) { | |
565 res = _hrs->obj_allocate(word_size); | |
566 } | |
567 if (res == NULL) { | |
568 // Try expansion. | |
569 size_t fs = _hrs->free_suffix(); | |
570 if (fs + x_size >= num_regions) { | |
571 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
572 res = _hrs->obj_allocate(word_size); | |
573 assert(res != NULL, "This should have worked."); | |
574 } else { | |
575 // Expansion won't help. Are there enough free regions if we get rid | |
576 // of reservations? | |
577 size_t avail = free_regions(); | |
578 if (avail >= num_regions) { | |
579 res = _hrs->obj_allocate(word_size); | |
580 if (res != NULL) { | |
581 remove_allocated_regions_from_lists(); | |
582 eliminated_allocated_from_lists = true; | |
583 } | |
584 } | |
585 } | |
586 } | |
587 if (res != NULL) { | |
588 // Increment by the number of regions allocated. | |
589 // FIXME: Assumes regions all of size GrainBytes. | |
590 #ifndef PRODUCT | |
591 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
592 HeapRegion::GrainWords)); | |
593 #endif | |
594 if (!eliminated_allocated_from_lists) | |
595 remove_allocated_regions_from_lists(); | |
596 _summary_bytes_used += word_size * HeapWordSize; | |
597 _free_regions -= num_regions; | |
598 _num_humongous_regions += (int) num_regions; | |
599 } | |
600 assert(regions_accounted_for(), "Region Leakage"); | |
601 return res; | |
602 } | |
603 | |
1973 | 604 void |
605 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { | |
606 // The cleanup operation might update _summary_bytes_used | |
607 // concurrently with this method. So, right now, if we don't wait | |
608 // for it to complete, updates to _summary_bytes_used might get | |
609 // lost. This will be resolved in the near future when the operation | |
610 // of the free region list is revamped as part of CR 6977804. | |
611 wait_for_cleanup_complete(); | |
612 | |
613 retire_cur_alloc_region_common(cur_alloc_region); | |
614 assert(_cur_alloc_region == NULL, "post-condition"); | |
615 } | |
616 | |
617 // See the comment in the .hpp file about the locking protocol and | |
618 // assumptions of this method (and other related ones). | |
342 | 619 HeapWord* |
1973 | 620 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, |
621 bool at_safepoint, | |
622 bool do_dirtying) { | |
623 assert_heap_locked_or_at_safepoint(); | |
624 assert(_cur_alloc_region == NULL, | |
625 "replace_cur_alloc_region_and_allocate() should only be called " | |
626 "after retiring the previous current alloc region"); | |
627 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
628 "at_safepoint and is_at_safepoint() should be a tautology"); | |
629 | |
630 if (!g1_policy()->is_young_list_full()) { | |
631 if (!at_safepoint) { | |
632 // The cleanup operation might update _summary_bytes_used | |
633 // concurrently with this method. So, right now, if we don't | |
634 // wait for it to complete, updates to _summary_bytes_used might | |
635 // get lost. This will be resolved in the near future when the | |
636 // operation of the free region list is revamped as part of | |
637 // CR 6977804. If we're already at a safepoint, this call is | |
638 // unnecessary, not to mention wrong. | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
639 wait_for_cleanup_complete(); |
342 | 640 } |
1973 | 641 |
642 HeapRegion* new_cur_alloc_region = newAllocRegion(word_size, | |
643 false /* zero_filled */); | |
644 if (new_cur_alloc_region != NULL) { | |
645 assert(new_cur_alloc_region->is_empty(), | |
646 "the newly-allocated region should be empty, " | |
647 "as right now we only allocate new regions out of the free list"); | |
648 g1_policy()->update_region_num(true /* next_is_young */); | |
649 _summary_bytes_used -= new_cur_alloc_region->used(); | |
650 set_region_short_lived_locked(new_cur_alloc_region); | |
651 | |
652 assert(!new_cur_alloc_region->isHumongous(), | |
653 "Catch a regression of this bug."); | |
654 | |
655 // We need to ensure that the stores to _cur_alloc_region and, | |
656 // subsequently, to top do not float above the setting of the | |
657 // young type. | |
658 OrderAccess::storestore(); | |
659 | |
660 // Now allocate out of the new current alloc region. We could | |
661 // have re-used allocate_from_cur_alloc_region() but its | |
662 // operation is slightly different to what we need here. First, | |
663 // allocate_from_cur_alloc_region() is only called outside a | |
664 // safepoint and will always unlock the Heap_lock if it returns | |
665 // a non-NULL result. Second, it assumes that the current alloc | |
666 // region is what's already assigned in _cur_alloc_region. What | |
667 // we want here is to actually do the allocation first before we | |
668 // assign the new region to _cur_alloc_region. This ordering is | |
669 // not currently important, but it will be essential when we | |
670 // change the code to support CAS allocation in the future (see | |
671 // CR 6994297). | |
672 // | |
673 // This allocate method does BOT updates and we don't need them in | |
674 // the young generation. This will be fixed in the near future by | |
675 // CR 6994297. | |
676 HeapWord* result = new_cur_alloc_region->allocate(word_size); | |
677 assert(result != NULL, "we just allocate out of an empty region " | |
678 "so allocation should have been successful"); | |
679 assert(is_in(result), "result should be in the heap"); | |
680 | |
681 _cur_alloc_region = new_cur_alloc_region; | |
682 | |
683 if (!at_safepoint) { | |
684 Heap_lock->unlock(); | |
685 } | |
686 | |
687 // do the dirtying, if necessary, after we release the Heap_lock | |
688 if (do_dirtying) { | |
689 dirty_young_block(result, word_size); | |
690 } | |
691 return result; | |
692 } | |
693 } | |
694 | |
695 assert(_cur_alloc_region == NULL, "we failed to allocate a new current " | |
696 "alloc region, it should still be NULL"); | |
697 assert_heap_locked_or_at_safepoint(); | |
698 return NULL; | |
699 } | |
700 | |
701 // See the comment in the .hpp file about the locking protocol and | |
702 // assumptions of this method (and other related ones). | |
703 HeapWord* | |
704 G1CollectedHeap::attempt_allocation_slow(size_t word_size) { | |
705 assert_heap_locked_and_not_at_safepoint(); | |
706 assert(!isHumongous(word_size), "attempt_allocation_slow() should not be " | |
707 "used for humongous allocations"); | |
708 | |
709 // We will loop while succeeded is false, which means that we tried | |
710 // to do a collection, but the VM op did not succeed. So, when we | |
711 // exit the loop, either one of the allocation attempts was | |
712 // successful, or we succeeded in doing the VM op but which was | |
713 // unable to allocate after the collection. | |
714 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
715 bool succeeded = true; | |
716 | |
717 { | |
718 // We may have concurrent cleanup working at the time. Wait for | |
719 // it to complete. In the future we would probably want to make | |
720 // the concurrent cleanup truly concurrent by decoupling it from | |
721 // the allocation. This will happen in the near future as part | |
722 // of CR 6977804 which will revamp the operation of the free | |
723 // region list. The fact that wait_for_cleanup_complete() will | |
724 // do a wait() means that we'll give up the Heap_lock. So, it's | |
725 // possible that when we exit wait_for_cleanup_complete() we | |
726 // might be able to allocate successfully (since somebody else | |
727 // might have done a collection meanwhile). So, we'll attempt to | |
728 // allocate again, just in case. When we make cleanup truly | |
729 // concurrent with allocation, we should remove this allocation | |
730 // attempt as it's redundant (we only reach here after an | |
731 // allocation attempt has been unsuccessful). | |
732 wait_for_cleanup_complete(); | |
733 HeapWord* result = attempt_allocation(word_size); | |
734 if (result != NULL) { | |
735 assert_heap_not_locked(); | |
736 return result; | |
342 | 737 } |
738 } | |
1973 | 739 |
740 if (GC_locker::is_active_and_needs_gc()) { | |
741 // We are locked out of GC because of the GC locker. Right now, | |
742 // we'll just stall until the GC locker-induced GC | |
743 // completes. This will be fixed in the near future by extending | |
744 // the eden while waiting for the GC locker to schedule the GC | |
745 // (see CR 6994056). | |
746 | |
747 // If this thread is not in a jni critical section, we stall | |
748 // the requestor until the critical section has cleared and | |
749 // GC allowed. When the critical section clears, a GC is | |
750 // initiated by the last thread exiting the critical section; so | |
751 // we retry the allocation sequence from the beginning of the loop, | |
752 // rather than causing more, now probably unnecessary, GC attempts. | |
753 JavaThread* jthr = JavaThread::current(); | |
754 assert(jthr != NULL, "sanity"); | |
755 if (!jthr->in_critical()) { | |
756 MutexUnlocker mul(Heap_lock); | |
757 GC_locker::stall_until_clear(); | |
758 | |
759 // We'll then fall off the end of the ("if GC locker active") | |
760 // if-statement and retry the allocation further down in the | |
761 // loop. | |
762 } else { | |
763 if (CheckJNICalls) { | |
764 fatal("Possible deadlock due to allocating while" | |
765 " in jni critical section"); | |
766 } | |
767 return NULL; | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1656
diff
changeset
|
768 } |
1973 | 769 } else { |
770 // We are not locked out. So, let's try to do a GC. The VM op | |
771 // will retry the allocation before it completes. | |
772 | |
773 // Read the GC count while holding the Heap_lock | |
774 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
775 | |
776 Heap_lock->unlock(); | |
777 | |
778 HeapWord* result = | |
779 do_collection_pause(word_size, gc_count_before, &succeeded); | |
780 assert_heap_not_locked(); | |
781 if (result != NULL) { | |
782 assert(succeeded, "the VM op should have succeeded"); | |
783 | |
784 // Allocations that take place on VM operations do not do any | |
785 // card dirtying and we have to do it here. | |
786 dirty_young_block(result, word_size); | |
787 return result; | |
788 } | |
789 | |
790 Heap_lock->lock(); | |
791 } | |
792 | |
793 assert_heap_locked(); | |
794 | |
795 // We can reach here when we were unsuccessful in doing a GC, | |
796 // because another thread beat us to it, or because we were locked | |
797 // out of GC due to the GC locker. In either case a new alloc | |
798 // region might be available so we will retry the allocation. | |
799 HeapWord* result = attempt_allocation(word_size); | |
800 if (result != NULL) { | |
801 assert_heap_not_locked(); | |
802 return result; | |
803 } | |
804 | |
805 // So far our attempts to allocate failed. The only time we'll go | |
806 // around the loop and try again is if we tried to do a GC and the | |
807 // VM op that we tried to schedule was not successful because | |
808 // another thread beat us to it. If that happened it's possible | |
809 // that by the time we grabbed the Heap_lock again and tried to | |
810 // allocate other threads filled up the young generation, which | |
811 // means that the allocation attempt after the GC also failed. So, | |
812 // it's worth trying to schedule another GC pause. | |
813 if (succeeded) { | |
814 break; | |
815 } | |
816 | |
817 // Give a warning if we seem to be looping forever. | |
818 if ((QueuedAllocationWarningCount > 0) && | |
819 (try_count % QueuedAllocationWarningCount == 0)) { | |
820 warning("G1CollectedHeap::attempt_allocation_slow() " | |
821 "retries %d times", try_count); | |
342 | 822 } |
823 } | |
824 | |
1973 | 825 assert_heap_locked(); |
826 return NULL; | |
827 } | |
828 | |
829 // See the comment in the .hpp file about the locking protocol and | |
830 // assumptions of this method (and other related ones). | |
831 HeapWord* | |
832 G1CollectedHeap::attempt_allocation_humongous(size_t word_size, | |
833 bool at_safepoint) { | |
834 // This is the method that will allocate a humongous object. All | |
835 // allocation paths that attempt to allocate a humongous object | |
836 // should eventually reach here. Currently, the only paths are from | |
837 // mem_allocate() and attempt_allocation_at_safepoint(). | |
838 assert_heap_locked_or_at_safepoint(); | |
839 assert(isHumongous(word_size), "attempt_allocation_humongous() " | |
840 "should only be used for humongous allocations"); | |
841 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, | |
842 "at_safepoint and is_at_safepoint() should be a tautology"); | |
843 | |
844 HeapWord* result = NULL; | |
845 | |
846 // We will loop while succeeded is false, which means that we tried | |
847 // to do a collection, but the VM op did not succeed. So, when we | |
848 // exit the loop, either one of the allocation attempts was | |
849 // successful, or we succeeded in doing the VM op but which was | |
850 // unable to allocate after the collection. | |
851 for (int try_count = 1; /* we'll return or break */; try_count += 1) { | |
852 bool succeeded = true; | |
853 | |
854 // Given that humongous objects are not allocated in young | |
855 // regions, we'll first try to do the allocation without doing a | |
856 // collection hoping that there's enough space in the heap. | |
857 result = humongous_obj_allocate(word_size); | |
858 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
859 "catch a regression of this bug."); | |
860 if (result != NULL) { | |
861 if (!at_safepoint) { | |
862 // If we're not at a safepoint, unlock the Heap_lock. | |
863 Heap_lock->unlock(); | |
864 } | |
865 return result; | |
866 } | |
867 | |
868 // If we failed to allocate the humongous object, we should try to | |
869 // do a collection pause (if we're allowed) in case it reclaims | |
870 // enough space for the allocation to succeed after the pause. | |
871 if (!at_safepoint) { | |
872 // Read the GC count while holding the Heap_lock | |
873 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); | |
874 | |
875 // If we're allowed to do a collection we're not at a | |
876 // safepoint, so it is safe to unlock the Heap_lock. | |
342 | 877 Heap_lock->unlock(); |
1973 | 878 |
879 result = do_collection_pause(word_size, gc_count_before, &succeeded); | |
880 assert_heap_not_locked(); | |
881 if (result != NULL) { | |
882 assert(succeeded, "the VM op should have succeeded"); | |
883 return result; | |
884 } | |
885 | |
886 // If we get here, the VM operation either did not succeed | |
887 // (i.e., another thread beat us to it) or it succeeded but | |
888 // failed to allocate the object. | |
889 | |
890 // If we're allowed to do a collection we're not at a | |
891 // safepoint, so it is safe to lock the Heap_lock. | |
892 Heap_lock->lock(); | |
893 } | |
894 | |
895 assert(result == NULL, "otherwise we should have exited the loop earlier"); | |
896 | |
897 // So far our attempts to allocate failed. The only time we'll go | |
898 // around the loop and try again is if we tried to do a GC and the | |
899 // VM op that we tried to schedule was not successful because | |
900 // another thread beat us to it. That way it's possible that some | |
901 // space was freed up by the thread that successfully scheduled a | |
902 // GC. So it's worth trying to allocate again. | |
903 if (succeeded) { | |
904 break; | |
342 | 905 } |
906 | |
1973 | 907 // Give a warning if we seem to be looping forever. |
908 if ((QueuedAllocationWarningCount > 0) && | |
909 (try_count % QueuedAllocationWarningCount == 0)) { | |
910 warning("G1CollectedHeap::attempt_allocation_humongous " | |
911 "retries %d times", try_count); | |
912 } | |
913 } | |
914 | |
915 assert_heap_locked_or_at_safepoint(); | |
916 return NULL; | |
917 } | |
918 | |
919 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, | |
920 bool expect_null_cur_alloc_region) { | |
921 assert_at_safepoint(); | |
922 assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region, | |
923 "The current alloc region should only be non-NULL if we're " | |
924 "expecting it not to be NULL"); | |
925 | |
926 if (!isHumongous(word_size)) { | |
927 if (!expect_null_cur_alloc_region) { | |
928 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
929 if (cur_alloc_region != NULL) { | |
930 // This allocate method does BOT updates and we don't need them in | |
931 // the young generation. This will be fixed in the near future by | |
932 // CR 6994297. | |
933 HeapWord* result = cur_alloc_region->allocate(word_size); | |
934 if (result != NULL) { | |
935 assert(is_in(result), "result should be in the heap"); | |
936 | |
937 // We will not do any dirtying here. This is guaranteed to be | |
938 // called during a safepoint and the thread that scheduled the | |
939 // pause will do the dirtying if we return a non-NULL result. | |
940 return result; | |
941 } | |
942 | |
943 retire_cur_alloc_region_common(cur_alloc_region); | |
944 } | |
342 | 945 } |
1973 | 946 |
947 assert(_cur_alloc_region == NULL, | |
948 "at this point we should have no cur alloc region"); | |
949 return replace_cur_alloc_region_and_allocate(word_size, | |
950 true, /* at_safepoint */ | |
951 false /* do_dirtying */); | |
952 } else { | |
953 return attempt_allocation_humongous(word_size, | |
954 true /* at_safepoint */); | |
955 } | |
956 | |
957 ShouldNotReachHere(); | |
958 } | |
959 | |
960 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { | |
961 assert_heap_not_locked_and_not_at_safepoint(); | |
962 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); | |
963 | |
964 Heap_lock->lock(); | |
965 | |
966 // First attempt: try allocating out of the current alloc region or | |
967 // after replacing the current alloc region. | |
968 HeapWord* result = attempt_allocation(word_size); | |
969 if (result != NULL) { | |
970 assert_heap_not_locked(); | |
971 return result; | |
972 } | |
973 | |
974 assert_heap_locked(); | |
975 | |
976 // Second attempt: go into the even slower path where we might | |
977 // try to schedule a collection. | |
978 result = attempt_allocation_slow(word_size); | |
979 if (result != NULL) { | |
980 assert_heap_not_locked(); | |
981 return result; | |
982 } | |
983 | |
984 assert_heap_locked(); | |
985 Heap_lock->unlock(); | |
986 return NULL; | |
342 | 987 } |
988 | |
989 HeapWord* | |
990 G1CollectedHeap::mem_allocate(size_t word_size, | |
991 bool is_noref, | |
992 bool is_tlab, | |
1973 | 993 bool* gc_overhead_limit_was_exceeded) { |
994 assert_heap_not_locked_and_not_at_safepoint(); | |
995 assert(!is_tlab, "mem_allocate() this should not be called directly " | |
996 "to allocate TLABs"); | |
342 | 997 |
998 // Loop until the allocation is satisified, | |
999 // or unsatisfied after GC. | |
1973 | 1000 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1001 unsigned int gc_count_before; | |
342 | 1002 { |
1003 Heap_lock->lock(); | |
1973 | 1004 |
1005 if (!isHumongous(word_size)) { | |
1006 // First attempt: try allocating out of the current alloc | |
1007 // region or after replacing the current alloc region. | |
1008 HeapWord* result = attempt_allocation(word_size); | |
1009 if (result != NULL) { | |
1010 assert_heap_not_locked(); | |
1011 return result; | |
1012 } | |
1013 | |
1014 assert_heap_locked(); | |
1015 | |
1016 // Second attempt: go into the even slower path where we might | |
1017 // try to schedule a collection. | |
1018 result = attempt_allocation_slow(word_size); | |
1019 if (result != NULL) { | |
1020 assert_heap_not_locked(); | |
1021 return result; | |
1022 } | |
1023 } else { | |
1024 HeapWord* result = attempt_allocation_humongous(word_size, | |
1025 false /* at_safepoint */); | |
1026 if (result != NULL) { | |
1027 assert_heap_not_locked(); | |
1028 return result; | |
1029 } | |
342 | 1030 } |
1973 | 1031 |
1032 assert_heap_locked(); | |
342 | 1033 // Read the gc count while the heap lock is held. |
1034 gc_count_before = SharedHeap::heap()->total_collections(); | |
1973 | 1035 // We cannot be at a safepoint, so it is safe to unlock the Heap_lock |
342 | 1036 Heap_lock->unlock(); |
1037 } | |
1038 | |
1039 // Create the garbage collection operation... | |
1973 | 1040 VM_G1CollectForAllocation op(gc_count_before, word_size); |
342 | 1041 // ...and get the VM thread to execute it. |
1042 VMThread::execute(&op); | |
1973 | 1043 |
1044 assert_heap_not_locked(); | |
1045 if (op.prologue_succeeded() && op.pause_succeeded()) { | |
1046 // If the operation was successful we'll return the result even | |
1047 // if it is NULL. If the allocation attempt failed immediately | |
1048 // after a Full GC, it's unlikely we'll be able to allocate now. | |
1049 HeapWord* result = op.result(); | |
1050 if (result != NULL && !isHumongous(word_size)) { | |
1051 // Allocations that take place on VM operations do not do any | |
1052 // card dirtying and we have to do it here. We only have to do | |
1053 // this for non-humongous allocations, though. | |
1054 dirty_young_block(result, word_size); | |
1055 } | |
342 | 1056 return result; |
1973 | 1057 } else { |
1058 assert(op.result() == NULL, | |
1059 "the result should be NULL if the VM op did not succeed"); | |
342 | 1060 } |
1061 | |
1062 // Give a warning if we seem to be looping forever. | |
1063 if ((QueuedAllocationWarningCount > 0) && | |
1064 (try_count % QueuedAllocationWarningCount == 0)) { | |
1973 | 1065 warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
342 | 1066 } |
1067 } | |
1973 | 1068 |
1069 ShouldNotReachHere(); | |
342 | 1070 } |
1071 | |
1072 void G1CollectedHeap::abandon_cur_alloc_region() { | |
1073 if (_cur_alloc_region != NULL) { | |
1074 // We're finished with the _cur_alloc_region. | |
1075 if (_cur_alloc_region->is_empty()) { | |
1076 _free_regions++; | |
1077 free_region(_cur_alloc_region); | |
1078 } else { | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1079 // As we're builing (at least the young portion) of the collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1080 // set incrementally we'll add the current allocation region to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1081 // the collection set here. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1082 if (_cur_alloc_region->is_young()) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1083 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1084 } |
342 | 1085 _summary_bytes_used += _cur_alloc_region->used(); |
1086 } | |
1087 _cur_alloc_region = NULL; | |
1088 } | |
1089 } | |
1090 | |
636 | 1091 void G1CollectedHeap::abandon_gc_alloc_regions() { |
1092 // first, make sure that the GC alloc region list is empty (it should!) | |
1093 assert(_gc_alloc_region_list == NULL, "invariant"); | |
1094 release_gc_alloc_regions(true /* totally */); | |
1095 } | |
1096 | |
342 | 1097 class PostMCRemSetClearClosure: public HeapRegionClosure { |
1098 ModRefBarrierSet* _mr_bs; | |
1099 public: | |
1100 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1101 bool doHeapRegion(HeapRegion* r) { | |
1102 r->reset_gc_time_stamp(); | |
1103 if (r->continuesHumongous()) | |
1104 return false; | |
1105 HeapRegionRemSet* hrrs = r->rem_set(); | |
1106 if (hrrs != NULL) hrrs->clear(); | |
1107 // You might think here that we could clear just the cards | |
1108 // corresponding to the used region. But no: if we leave a dirty card | |
1109 // in a region we might allocate into, then it would prevent that card | |
1110 // from being enqueued, and cause it to be missed. | |
1111 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
1112 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
1113 return false; | |
1114 } | |
1115 }; | |
1116 | |
1117 | |
1118 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
1119 ModRefBarrierSet* _mr_bs; | |
1120 public: | |
1121 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
1122 bool doHeapRegion(HeapRegion* r) { | |
1123 if (r->continuesHumongous()) return false; | |
1124 if (r->used_region().word_size() != 0) { | |
1125 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
1126 } | |
1127 return false; | |
1128 } | |
1129 }; | |
1130 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1131 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1132 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1133 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1134 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1135 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1136 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
1861 | 1137 _cl(g1->g1_rem_set(), worker_i), |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1138 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1139 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1140 { } |
1960
878b57474103
6978187: G1: assert(ParallelGCThreads> 1 || n_yielded() == _hrrs->occupied()) strikes again
johnc
parents:
1883
diff
changeset
|
1141 |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1142 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1143 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1144 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1145 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1146 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1147 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1148 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1149 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1150 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1151 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1152 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1153 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1154 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1155 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1156 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1157 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1158 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1159 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1160 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1161 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1162 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1163 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1164 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1165 |
1973 | 1166 bool G1CollectedHeap::do_collection(bool explicit_gc, |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1167 bool clear_all_soft_refs, |
342 | 1168 size_t word_size) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1169 if (GC_locker::check_active_before_gc()) { |
1973 | 1170 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1171 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
1172 |
342 | 1173 ResourceMark rm; |
1174 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1175 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1176 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1177 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1178 |
342 | 1179 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
1180 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
1181 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1182 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1183 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1184 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1185 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1186 |
342 | 1187 { |
1188 IsGCActiveMark x; | |
1189 | |
1190 // Timing | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1191 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1192 assert(!system_gc || explicit_gc, "invariant"); |
342 | 1193 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
1194 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1195 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1196 PrintGC, true, gclog_or_tty); |
342 | 1197 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1198 TraceMemoryManagerStats tms(true /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1199 |
342 | 1200 double start = os::elapsedTime(); |
1201 g1_policy()->record_full_collection_start(); | |
1202 | |
1203 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1204 increment_total_collections(true /* full gc */); |
342 | 1205 |
1206 size_t g1h_prev_used = used(); | |
1207 assert(used() == recalculate_used(), "Should be equal"); | |
1208 | |
1209 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
1210 HandleMark hm; // Discard invalid handles created during verification | |
1211 prepare_for_verify(); | |
1212 gclog_or_tty->print(" VerifyBeforeGC:"); | |
1213 Universe::verify(true); | |
1214 } | |
1215 assert(regions_accounted_for(), "Region leakage!"); | |
1216 | |
1217 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
1218 | |
1219 // We want to discover references, but not process them yet. | |
1220 // This mode is disabled in | |
1221 // instanceRefKlass::process_discovered_references if the | |
1222 // generation does some collection work, or | |
1223 // instanceRefKlass::enqueue_discovered_references if the | |
1224 // generation returns without doing any work. | |
1225 ref_processor()->disable_discovery(); | |
1226 ref_processor()->abandon_partial_discovery(); | |
1227 ref_processor()->verify_no_references_recorded(); | |
1228 | |
1229 // Abandon current iterations of concurrent marking and concurrent | |
1230 // refinement, if any are in progress. | |
1231 concurrent_mark()->abort(); | |
1232 | |
1233 // Make sure we'll choose a new allocation region afterwards. | |
1234 abandon_cur_alloc_region(); | |
636 | 1235 abandon_gc_alloc_regions(); |
342 | 1236 assert(_cur_alloc_region == NULL, "Invariant."); |
1861 | 1237 g1_rem_set()->cleanupHRRS(); |
342 | 1238 tear_down_region_lists(); |
1239 set_used_regions_to_need_zero_fill(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1240 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1241 // We may have added regions to the current incremental collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1242 // set between the last GC or pause and now. We need to clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1243 // incremental collection set and then start rebuilding it afresh |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1244 // after this full GC. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1245 abandon_collection_set(g1_policy()->inc_cset_head()); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1246 g1_policy()->clear_incremental_cset(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1247 g1_policy()->stop_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1248 |
342 | 1249 if (g1_policy()->in_young_gc_mode()) { |
1250 empty_young_list(); | |
1251 g1_policy()->set_full_young_gcs(true); | |
1252 } | |
1253 | |
1254 // Temporarily make reference _discovery_ single threaded (non-MT). | |
1255 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
1256 | |
1257 // Temporarily make refs discovery atomic | |
1258 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
1259 | |
1260 // Temporarily clear _is_alive_non_header | |
1261 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
1262 | |
1263 ref_processor()->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1264 ref_processor()->setup_policy(do_clear_all_soft_refs); |
342 | 1265 |
1266 // Do collection work | |
1267 { | |
1268 HandleMark hm; // Discard invalid handles created during gc | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1269 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
342 | 1270 } |
1271 // Because freeing humongous regions may have added some unclean | |
1272 // regions, it is necessary to tear down again before rebuilding. | |
1273 tear_down_region_lists(); | |
1274 rebuild_region_lists(); | |
1275 | |
1276 _summary_bytes_used = recalculate_used(); | |
1277 | |
1278 ref_processor()->enqueue_discovered_references(); | |
1279 | |
1280 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1281 | |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1282 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
1283 |
342 | 1284 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
1285 HandleMark hm; // Discard invalid handles created during verification | |
1286 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1287 prepare_for_verify(); |
342 | 1288 Universe::verify(false); |
1289 } | |
1290 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1291 | |
1292 reset_gc_time_stamp(); | |
1293 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1294 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1295 // sets. We will also reset the GC time stamps of the regions. |
342 | 1296 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1297 heap_region_iterate(&rs_clear); | |
1298 | |
1299 // Resize the heap if necessary. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1300 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
342 | 1301 |
1302 if (_cg1r->use_cache()) { | |
1303 _cg1r->clear_and_record_card_counts(); | |
1304 _cg1r->clear_hot_cache(); | |
1305 } | |
1306 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1307 // Rebuild remembered sets of all regions. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1308 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
1309 if (G1CollectedHeap::use_parallel_gc_threads()) { |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1310 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1311 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1312 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1313 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1314 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1315 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1316 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1317 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1318 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1319 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1320 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1321 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1322 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1323 |
342 | 1324 if (PrintGC) { |
1325 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1326 } | |
1327 | |
1328 if (true) { // FIXME | |
1329 // Ask the permanent generation to adjust size for full collections | |
1330 perm()->compute_new_size(); | |
1331 } | |
1332 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1333 // Start a new incremental collection set for the next pause |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1334 assert(g1_policy()->collection_set() == NULL, "must be"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1335 g1_policy()->start_incremental_cset_building(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1336 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1337 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1338 // regions to the incremental collection set for the next |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1339 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1340 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1341 |
342 | 1342 double end = os::elapsedTime(); |
1343 g1_policy()->record_full_collection_end(); | |
1344 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1345 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1346 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1347 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1348 |
342 | 1349 gc_epilogue(true); |
1350 | |
794 | 1351 // Discard all rset updates |
1352 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1353 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1354 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1355 assert(regions_accounted_for(), "Region leakage!"); |
1356 } | |
1357 | |
1358 if (g1_policy()->in_young_gc_mode()) { | |
1359 _young_list->reset_sampled_info(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1360 // At this point there should be no regions in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1361 // entire heap tagged as young. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1362 assert( check_young_list_empty(true /* check_heap */), |
342 | 1363 "young list should be empty at this point"); |
1364 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1365 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1366 // Update the number of full collections that have been completed. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1367 increment_full_collections_completed(false /* outer */); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1368 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1369 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1370 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1371 } |
1973 | 1372 |
1373 return true; | |
342 | 1374 } |
1375 | |
1376 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1973 | 1377 // do_collection() will return whether it succeeded in performing |
1378 // the GC. Currently, there is no facility on the | |
1379 // do_full_collection() API to notify the caller than the collection | |
1380 // did not succeed (e.g., because it was locked out by the GC | |
1381 // locker). So, right now, we'll ignore the return value. | |
1382 bool dummy = do_collection(true, /* explicit_gc */ | |
1383 clear_all_soft_refs, | |
1384 0 /* word_size */); | |
342 | 1385 } |
1386 | |
1387 // This code is mostly copied from TenuredGeneration. | |
1388 void | |
1389 G1CollectedHeap:: | |
1390 resize_if_necessary_after_full_collection(size_t word_size) { | |
1391 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1392 | |
1393 // Include the current allocation, if any, and bytes that will be | |
1394 // pre-allocated to support collections, as "used". | |
1395 const size_t used_after_gc = used(); | |
1396 const size_t capacity_after_gc = capacity(); | |
1397 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1398 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1399 // This is enforced in arguments.cpp. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1400 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1401 "otherwise the code below doesn't make sense"); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1402 |
342 | 1403 // We don't have floating point command-line arguments |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1404 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
342 | 1405 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1406 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
342 | 1407 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1408 | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1409 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1410 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1411 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1412 // We have to be careful here as these two calculations can overflow |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1413 // 32-bit size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1414 double used_after_gc_d = (double) used_after_gc; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1415 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1416 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1417 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1418 // Let's make sure that they are both under the max heap size, which |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1419 // by default will make them fit into a size_t. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1420 double desired_capacity_upper_bound = (double) max_heap_size; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1421 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1422 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1423 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1424 desired_capacity_upper_bound); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1425 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1426 // We can now safely turn them into size_t's. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1427 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1428 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1429 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1430 // This assert only makes sense here, before we adjust them |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1431 // with respect to the min and max heap size. |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1432 assert(minimum_desired_capacity <= maximum_desired_capacity, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1433 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1434 "maximum_desired_capacity = "SIZE_FORMAT, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1435 minimum_desired_capacity, maximum_desired_capacity)); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1436 |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1437 // Should not be greater than the heap max size. No need to adjust |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1438 // it with respect to the heap min size as it's a lower bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1439 // we'll try to make the capacity larger than it, not smaller). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1440 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1441 // Should not be less than the heap min size. No need to adjust it |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1442 // with respect to the heap max size as it's an upper bound (i.e., |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1443 // we'll try to make the capacity smaller than it, not greater). |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1444 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
342 | 1445 |
1446 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1447 const double free_percentage = |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1448 (double) free_after_gc / (double) capacity_after_gc; |
342 | 1449 gclog_or_tty->print_cr("Computing new size after full GC "); |
1450 gclog_or_tty->print_cr(" " | |
1451 " minimum_free_percentage: %6.2f", | |
1452 minimum_free_percentage); | |
1453 gclog_or_tty->print_cr(" " | |
1454 " maximum_free_percentage: %6.2f", | |
1455 maximum_free_percentage); | |
1456 gclog_or_tty->print_cr(" " | |
1457 " capacity: %6.1fK" | |
1458 " minimum_desired_capacity: %6.1fK" | |
1459 " maximum_desired_capacity: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1460 (double) capacity_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1461 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1462 (double) maximum_desired_capacity / (double) K); |
342 | 1463 gclog_or_tty->print_cr(" " |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1464 " free_after_gc: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1465 " used_after_gc: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1466 (double) free_after_gc / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1467 (double) used_after_gc / (double) K); |
342 | 1468 gclog_or_tty->print_cr(" " |
1469 " free_percentage: %6.2f", | |
1470 free_percentage); | |
1471 } | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1472 if (capacity_after_gc < minimum_desired_capacity) { |
342 | 1473 // Don't expand unless it's significant |
1474 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1475 expand(expand_bytes); | |
1476 if (PrintGC && Verbose) { | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1477 gclog_or_tty->print_cr(" " |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1478 " expanding:" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1479 " max_heap_size: %6.1fK" |
342 | 1480 " minimum_desired_capacity: %6.1fK" |
1481 " expand_bytes: %6.1fK", | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1482 (double) max_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1483 (double) minimum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1484 (double) expand_bytes / (double) K); |
342 | 1485 } |
1486 | |
1487 // No expansion, now see if we want to shrink | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1488 } else if (capacity_after_gc > maximum_desired_capacity) { |
342 | 1489 // Capacity too large, compute shrinking size |
1490 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1491 shrink(shrink_bytes); | |
1492 if (PrintGC && Verbose) { | |
1493 gclog_or_tty->print_cr(" " | |
1494 " shrinking:" | |
1717
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1495 " min_heap_size: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1496 " maximum_desired_capacity: %6.1fK" |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1497 " shrink_bytes: %6.1fK", |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1498 (double) min_heap_size / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1499 (double) maximum_desired_capacity / (double) K, |
688c3755d7af
6959014: G1: assert(minimum_desired_capacity <= maximum_desired_capacity) failed: sanity check
tonyp
parents:
1709
diff
changeset
|
1500 (double) shrink_bytes / (double) K); |
342 | 1501 } |
1502 } | |
1503 } | |
1504 | |
1505 | |
1506 HeapWord* | |
1973 | 1507 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1508 bool* succeeded) { | |
1509 assert(SafepointSynchronize::is_at_safepoint(), | |
1510 "satisfy_failed_allocation() should only be called at a safepoint"); | |
1511 assert(Thread::current()->is_VM_thread(), | |
1512 "satisfy_failed_allocation() should only be called by the VM thread"); | |
1513 | |
1514 *succeeded = true; | |
1515 // Let's attempt the allocation first. | |
1516 HeapWord* result = attempt_allocation_at_safepoint(word_size, | |
1517 false /* expect_null_cur_alloc_region */); | |
1518 if (result != NULL) { | |
1519 assert(*succeeded, "sanity"); | |
1520 return result; | |
1521 } | |
342 | 1522 |
1523 // In a G1 heap, we're supposed to keep allocation from failing by | |
1524 // incremental pauses. Therefore, at least for now, we'll favor | |
1525 // expansion over collection. (This might change in the future if we can | |
1526 // do something smarter than full collection to satisfy a failed alloc.) | |
1527 result = expand_and_allocate(word_size); | |
1528 if (result != NULL) { | |
1973 | 1529 assert(*succeeded, "sanity"); |
342 | 1530 return result; |
1531 } | |
1532 | |
1973 | 1533 // Expansion didn't work, we'll try to do a Full GC. |
1534 bool gc_succeeded = do_collection(false, /* explicit_gc */ | |
1535 false, /* clear_all_soft_refs */ | |
1536 word_size); | |
1537 if (!gc_succeeded) { | |
1538 *succeeded = false; | |
1539 return NULL; | |
1540 } | |
1541 | |
1542 // Retry the allocation | |
1543 result = attempt_allocation_at_safepoint(word_size, | |
1544 true /* expect_null_cur_alloc_region */); | |
342 | 1545 if (result != NULL) { |
1973 | 1546 assert(*succeeded, "sanity"); |
342 | 1547 return result; |
1548 } | |
1549 | |
1973 | 1550 // Then, try a Full GC that will collect all soft references. |
1551 gc_succeeded = do_collection(false, /* explicit_gc */ | |
1552 true, /* clear_all_soft_refs */ | |
1553 word_size); | |
1554 if (!gc_succeeded) { | |
1555 *succeeded = false; | |
1556 return NULL; | |
1557 } | |
1558 | |
1559 // Retry the allocation once more | |
1560 result = attempt_allocation_at_safepoint(word_size, | |
1561 true /* expect_null_cur_alloc_region */); | |
342 | 1562 if (result != NULL) { |
1973 | 1563 assert(*succeeded, "sanity"); |
342 | 1564 return result; |
1565 } | |
1566 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1567 assert(!collector_policy()->should_clear_all_soft_refs(), |
1973 | 1568 "Flag should have been handled and cleared prior to this point"); |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1360
diff
changeset
|
1569 |
342 | 1570 // What else? We might try synchronous finalization later. If the total |
1571 // space available is large enough for the allocation, then a more | |
1572 // complete compaction phase than we've tried so far might be | |
1573 // appropriate. | |
1973 | 1574 assert(*succeeded, "sanity"); |
342 | 1575 return NULL; |
1576 } | |
1577 | |
1578 // Attempting to expand the heap sufficiently | |
1579 // to support an allocation of the given "word_size". If | |
1580 // successful, perform the allocation and return the address of the | |
1581 // allocated block, or else "NULL". | |
1582 | |
1583 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1973 | 1584 assert(SafepointSynchronize::is_at_safepoint(), |
1585 "expand_and_allocate() should only be called at a safepoint"); | |
1586 assert(Thread::current()->is_VM_thread(), | |
1587 "expand_and_allocate() should only be called by the VM thread"); | |
1588 | |
342 | 1589 size_t expand_bytes = word_size * HeapWordSize; |
1590 if (expand_bytes < MinHeapDeltaBytes) { | |
1591 expand_bytes = MinHeapDeltaBytes; | |
1592 } | |
1593 expand(expand_bytes); | |
1594 assert(regions_accounted_for(), "Region leakage!"); | |
1973 | 1595 |
1596 return attempt_allocation_at_safepoint(word_size, | |
1597 true /* expect_null_cur_alloc_region */); | |
342 | 1598 } |
1599 | |
1600 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1601 size_t pre_used = 0; | |
1602 size_t cleared_h_regions = 0; | |
1603 size_t freed_regions = 0; | |
1604 UncleanRegionList local_list; | |
1605 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1606 freed_regions, &local_list); | |
1607 | |
1608 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1609 &local_list); | |
1610 return pre_used; | |
1611 } | |
1612 | |
1613 void | |
1614 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1615 size_t& pre_used, | |
1616 size_t& cleared_h, | |
1617 size_t& freed_regions, | |
1618 UncleanRegionList* list, | |
1619 bool par) { | |
1620 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1621 size_t res = 0; | |
677 | 1622 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1623 !hr->is_young()) { | |
1624 if (G1PolicyVerbose > 0) | |
1625 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1626 " during cleanup", hr, hr->used()); | |
1627 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1628 } |
1629 } | |
1630 | |
1631 // FIXME: both this and shrink could probably be more efficient by | |
1632 // doing one "VirtualSpace::expand_by" call rather than several. | |
1633 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1634 size_t old_mem_size = _g1_storage.committed_size(); | |
1635 // We expand by a minimum of 1K. | |
1636 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1637 size_t aligned_expand_bytes = | |
1638 ReservedSpace::page_align_size_up(expand_bytes); | |
1639 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1640 HeapRegion::GrainBytes); | |
1641 expand_bytes = aligned_expand_bytes; | |
1642 while (expand_bytes > 0) { | |
1643 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1644 // Commit more storage. | |
1645 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1646 if (!successful) { | |
1647 expand_bytes = 0; | |
1648 } else { | |
1649 expand_bytes -= HeapRegion::GrainBytes; | |
1650 // Expand the committed region. | |
1651 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1652 _g1_committed.set_end(high); | |
1653 // Create a new HeapRegion. | |
1654 MemRegion mr(base, high); | |
1655 bool is_zeroed = !_g1_max_committed.contains(base); | |
1656 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1657 | |
1658 // Now update max_committed if necessary. | |
1659 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1660 | |
1661 // Add it to the HeapRegionSeq. | |
1662 _hrs->insert(hr); | |
1663 // Set the zero-fill state, according to whether it's already | |
1664 // zeroed. | |
1665 { | |
1666 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1667 if (is_zeroed) { | |
1668 hr->set_zero_fill_complete(); | |
1669 put_free_region_on_list_locked(hr); | |
1670 } else { | |
1671 hr->set_zero_fill_needed(); | |
1672 put_region_on_unclean_list_locked(hr); | |
1673 } | |
1674 } | |
1675 _free_regions++; | |
1676 // And we used up an expansion region to create it. | |
1677 _expansion_regions--; | |
1678 // Tell the cardtable about it. | |
1679 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1680 // And the offset table as well. | |
1681 _bot_shared->resize(_g1_committed.word_size()); | |
1682 } | |
1683 } | |
1684 if (Verbose && PrintGC) { | |
1685 size_t new_mem_size = _g1_storage.committed_size(); | |
1686 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1687 old_mem_size/K, aligned_expand_bytes/K, | |
1688 new_mem_size/K); | |
1689 } | |
1690 } | |
1691 | |
1692 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1693 { | |
1694 size_t old_mem_size = _g1_storage.committed_size(); | |
1695 size_t aligned_shrink_bytes = | |
1696 ReservedSpace::page_align_size_down(shrink_bytes); | |
1697 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1698 HeapRegion::GrainBytes); | |
1699 size_t num_regions_deleted = 0; | |
1700 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1701 | |
1702 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1703 if (mr.byte_size() > 0) | |
1704 _g1_storage.shrink_by(mr.byte_size()); | |
1705 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1706 | |
1707 _g1_committed.set_end(mr.start()); | |
1708 _free_regions -= num_regions_deleted; | |
1709 _expansion_regions += num_regions_deleted; | |
1710 | |
1711 // Tell the cardtable about it. | |
1712 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1713 | |
1714 // And the offset table as well. | |
1715 _bot_shared->resize(_g1_committed.word_size()); | |
1716 | |
1717 HeapRegionRemSet::shrink_heap(n_regions()); | |
1718 | |
1719 if (Verbose && PrintGC) { | |
1720 size_t new_mem_size = _g1_storage.committed_size(); | |
1721 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1722 old_mem_size/K, aligned_shrink_bytes/K, | |
1723 new_mem_size/K); | |
1724 } | |
1725 } | |
1726 | |
1727 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1728 release_gc_alloc_regions(true /* totally */); |
342 | 1729 tear_down_region_lists(); // We will rebuild them in a moment. |
1730 shrink_helper(shrink_bytes); | |
1731 rebuild_region_lists(); | |
1732 } | |
1733 | |
1734 // Public methods. | |
1735 | |
1736 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1737 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1738 #endif // _MSC_VER | |
1739 | |
1740 | |
1741 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1742 SharedHeap(policy_), | |
1743 _g1_policy(policy_), | |
1111 | 1744 _dirty_card_queue_set(false), |
1705 | 1745 _into_cset_dirty_card_queue_set(false), |
342 | 1746 _ref_processor(NULL), |
1747 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1748 _bot_shared(NULL), | |
1749 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1750 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1751 _evac_failure_scan_stack(NULL) , | |
1752 _mark_in_progress(false), | |
1753 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1754 _cur_alloc_region(NULL), | |
1755 _refine_cte_cl(NULL), | |
1756 _free_region_list(NULL), _free_region_list_size(0), | |
1757 _free_regions(0), | |
1758 _full_collection(false), | |
1759 _unclean_region_list(), | |
1760 _unclean_regions_coming(false), | |
1761 _young_list(new YoungList(this)), | |
1762 _gc_time_stamp(0), | |
526 | 1763 _surviving_young_words(NULL), |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
1764 _full_collections_completed(0), |
526 | 1765 _in_cset_fast_test(NULL), |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1766 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1767 _dirty_cards_region_list(NULL) { |
342 | 1768 _g1h = this; // To catch bugs. |
1769 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1770 vm_exit_during_initialization("Failed necessary allocation."); | |
1771 } | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1772 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1773 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1774 |
342 | 1775 int n_queues = MAX2((int)ParallelGCThreads, 1); |
1776 _task_queues = new RefToScanQueueSet(n_queues); | |
1777 | |
1778 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1779 assert(n_rem_sets > 0, "Invariant."); | |
1780 | |
1781 HeapRegionRemSetIterator** iter_arr = | |
1782 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1783 for (int i = 0; i < n_queues; i++) { | |
1784 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1785 } | |
1786 _rem_set_iterator = iter_arr; | |
1787 | |
1788 for (int i = 0; i < n_queues; i++) { | |
1789 RefToScanQueue* q = new RefToScanQueue(); | |
1790 q->initialize(); | |
1791 _task_queues->register_queue(i, q); | |
1792 } | |
1793 | |
1794 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1795 _gc_alloc_regions[ap] = NULL; |
1796 _gc_alloc_region_counts[ap] = 0; | |
1797 _retained_gc_alloc_regions[ap] = NULL; | |
1798 // by default, we do not retain a GC alloc region for each ap; | |
1799 // we'll override this, when appropriate, below | |
1800 _retain_gc_alloc_region[ap] = false; | |
1801 } | |
1802 | |
1803 // We will try to remember the last half-full tenured region we | |
1804 // allocated to at the end of a collection so that we can re-use it | |
1805 // during the next collection. | |
1806 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1807 | |
342 | 1808 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1809 } | |
1810 | |
1811 jint G1CollectedHeap::initialize() { | |
1166 | 1812 CollectedHeap::pre_initialize(); |
342 | 1813 os::enable_vtime(); |
1814 | |
1815 // Necessary to satisfy locking discipline assertions. | |
1816 | |
1817 MutexLocker x(Heap_lock); | |
1818 | |
1819 // While there are no constraints in the GC code that HeapWordSize | |
1820 // be any particular value, there are multiple other areas in the | |
1821 // system which believe this to be true (e.g. oop->object_size in some | |
1822 // cases incorrectly returns the size in wordSize units rather than | |
1823 // HeapWordSize). | |
1824 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1825 | |
1826 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1827 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1828 | |
1829 // Ensure that the sizes are properly aligned. | |
1830 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1831 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1832 | |
1833 _cg1r = new ConcurrentG1Refine(); | |
1834 | |
1835 // Reserve the maximum. | |
1836 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1837 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1838 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1839 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1840 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1841 |
342 | 1842 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1843 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1844 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1845 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1846 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1847 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1848 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1849 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1850 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1851 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1852 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1853 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1854 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1855 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1856 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1857 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1858 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1859 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1860 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1861 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1862 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1863 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1864 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1865 } |
342 | 1866 |
1867 if (!heap_rs.is_reserved()) { | |
1868 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1869 return JNI_ENOMEM; | |
1870 } | |
1871 | |
1872 // It is important to do this in a way such that concurrent readers can't | |
1873 // temporarily think somethings in the heap. (I've actually seen this | |
1874 // happen in asserts: DLD.) | |
1875 _reserved.set_word_size(0); | |
1876 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1877 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1878 | |
1879 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1880 | |
1881 _num_humongous_regions = 0; | |
1882 | |
1883 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1884 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1885 set_barrier_set(rem_set()->bs()); | |
1886 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1887 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1888 } else { | |
1889 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1890 return JNI_ENOMEM; | |
1891 } | |
1892 | |
1893 // Also create a G1 rem set. | |
1861 | 1894 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
1895 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
342 | 1896 } else { |
1861 | 1897 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
1898 return JNI_ENOMEM; | |
342 | 1899 } |
1900 | |
1901 // Carve out the G1 part of the heap. | |
1902 | |
1903 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1904 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1905 g1_rs.size()/HeapWordSize); | |
1906 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1907 | |
1908 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1909 | |
1910 _g1_storage.initialize(g1_rs, 0); | |
1911 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1912 _g1_max_committed = _g1_committed; | |
393 | 1913 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1914 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1915 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1916 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1917 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1918 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1919 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1920 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1921 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1922 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1923 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1924 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
941
diff
changeset
|
1925 "too many cards per region"); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1926 |
342 | 1927 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1928 heap_word_size(init_byte_size)); | |
1929 | |
1930 _g1h = this; | |
1931 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1932 _in_cset_fast_test_length = max_regions(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1933 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1934 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1935 // We're biasing _in_cset_fast_test to avoid subtracting the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1936 // beginning of the heap every time we want to index; basically |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1937 // it's the same with what we do with the card table. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1938 _in_cset_fast_test = _in_cset_fast_test_base - |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1939 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1940 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1941 // Clear the _cset_fast_test bitmap in anticipation of adding |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1942 // regions to the incremental collection set for the first |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1943 // evacuation pause. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1944 clear_cset_fast_test(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
1945 |
342 | 1946 // Create the ConcurrentMark data structure and thread. |
1947 // (Must do this late, so that "max_regions" is defined.) | |
1948 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1949 _cmThread = _cm->cmThread(); | |
1950 | |
1951 // ...and the concurrent zero-fill thread, if necessary. | |
1952 if (G1ConcZeroFill) { | |
1953 _czft = new ConcurrentZFThread(); | |
1954 } | |
1955 | |
1956 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1957 HeapRegionRemSet::init_heap(max_regions()); | |
1958 | |
677 | 1959 // Now expand into the initial heap size. |
1960 expand(init_byte_size); | |
342 | 1961 |
1962 // Perform any initialization actions delegated to the policy. | |
1963 g1_policy()->init(); | |
1964 | |
1965 g1_policy()->note_start_of_mark_thread(); | |
1966 | |
1967 _refine_cte_cl = | |
1968 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1969 g1_rem_set(), | |
1970 concurrent_g1_refine()); | |
1971 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1972 | |
1973 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1974 SATB_Q_FL_lock, | |
1111 | 1975 G1SATBProcessCompletedThreshold, |
342 | 1976 Shared_SATB_Q_lock); |
794 | 1977 |
1978 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1979 DirtyCardQ_FL_lock, | |
1111 | 1980 concurrent_g1_refine()->yellow_zone(), |
1981 concurrent_g1_refine()->red_zone(), | |
794 | 1982 Shared_DirtyCardQ_lock); |
1983 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1984 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1985 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1986 DirtyCardQ_FL_lock, |
1111 | 1987 -1, // never trigger processing |
1988 -1, // no limit on length | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1989 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1990 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1991 } |
1705 | 1992 |
1993 // Initialize the card queue set used to hold cards containing | |
1994 // references into the collection set. | |
1995 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, | |
1996 DirtyCardQ_FL_lock, | |
1997 -1, // never trigger processing | |
1998 -1, // no limit on length | |
1999 Shared_DirtyCardQ_lock, | |
2000 &JavaThread::dirty_card_queue_set()); | |
2001 | |
342 | 2002 // In case we're keeping closure specialization stats, initialize those |
2003 // counts and that mechanism. | |
2004 SpecializationStats::clear(); | |
2005 | |
2006 _gc_alloc_region_list = NULL; | |
2007 | |
2008 // Do later initialization work for concurrent refinement. | |
2009 _cg1r->init(); | |
2010 | |
2011 return JNI_OK; | |
2012 } | |
2013 | |
2014 void G1CollectedHeap::ref_processing_init() { | |
2015 SharedHeap::ref_processing_init(); | |
2016 MemRegion mr = reserved_region(); | |
2017 _ref_processor = ReferenceProcessor::create_ref_processor( | |
2018 mr, // span | |
2019 false, // Reference discovery is not atomic | |
2020 // (though it shouldn't matter here.) | |
2021 true, // mt_discovery | |
2022 NULL, // is alive closure: need to fill this in for efficiency | |
2023 ParallelGCThreads, | |
2024 ParallelRefProcEnabled, | |
2025 true); // Setting next fields of discovered | |
2026 // lists requires a barrier. | |
2027 } | |
2028 | |
2029 size_t G1CollectedHeap::capacity() const { | |
2030 return _g1_committed.byte_size(); | |
2031 } | |
2032 | |
1705 | 2033 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
2034 DirtyCardQueue* into_cset_dcq, | |
2035 bool concurrent, | |
342 | 2036 int worker_i) { |
889 | 2037 // Clean cards in the hot card cache |
1705 | 2038 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
889 | 2039 |
342 | 2040 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
2041 int n_completed_buffers = 0; | |
1705 | 2042 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
342 | 2043 n_completed_buffers++; |
2044 } | |
2045 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
2046 (double) n_completed_buffers); | |
2047 dcqs.clear_n_completed_buffers(); | |
2048 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
2049 } | |
2050 | |
2051 | |
2052 // Computes the sum of the storage used by the various regions. | |
2053 | |
2054 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2055 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
2056 "Should be owned on this thread's behalf."); |
342 | 2057 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2058 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2059 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2060 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2061 result += hr->used(); |
342 | 2062 return result; |
2063 } | |
2064 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2065 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2066 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2067 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2068 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2069 |
342 | 2070 class SumUsedClosure: public HeapRegionClosure { |
2071 size_t _used; | |
2072 public: | |
2073 SumUsedClosure() : _used(0) {} | |
2074 bool doHeapRegion(HeapRegion* r) { | |
2075 if (!r->continuesHumongous()) { | |
2076 _used += r->used(); | |
2077 } | |
2078 return false; | |
2079 } | |
2080 size_t result() { return _used; } | |
2081 }; | |
2082 | |
2083 size_t G1CollectedHeap::recalculate_used() const { | |
2084 SumUsedClosure blk; | |
2085 _hrs->iterate(&blk); | |
2086 return blk.result(); | |
2087 } | |
2088 | |
2089 #ifndef PRODUCT | |
2090 class SumUsedRegionsClosure: public HeapRegionClosure { | |
2091 size_t _num; | |
2092 public: | |
677 | 2093 SumUsedRegionsClosure() : _num(0) {} |
342 | 2094 bool doHeapRegion(HeapRegion* r) { |
2095 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
2096 _num += 1; | |
2097 } | |
2098 return false; | |
2099 } | |
2100 size_t result() { return _num; } | |
2101 }; | |
2102 | |
2103 size_t G1CollectedHeap::recalculate_used_regions() const { | |
2104 SumUsedRegionsClosure blk; | |
2105 _hrs->iterate(&blk); | |
2106 return blk.result(); | |
2107 } | |
2108 #endif // PRODUCT | |
2109 | |
2110 size_t G1CollectedHeap::unsafe_max_alloc() { | |
2111 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
2112 // otherwise, is there space in the current allocation region? | |
2113 | |
2114 // We need to store the current allocation region in a local variable | |
2115 // here. The problem is that this method doesn't take any locks and | |
2116 // there may be other threads which overwrite the current allocation | |
2117 // region field. attempt_allocation(), for example, sets it to NULL | |
2118 // and this can happen *after* the NULL check here but before the call | |
2119 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
2120 // to be a problem in the optimized build, since the two loads of the | |
2121 // current allocation region field are optimized away. | |
2122 HeapRegion* car = _cur_alloc_region; | |
2123 | |
2124 // FIXME: should iterate over all regions? | |
2125 if (car == NULL) { | |
2126 return 0; | |
2127 } | |
2128 return car->free(); | |
2129 } | |
2130 | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2131 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2132 return |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2133 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2134 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2135 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2136 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2137 void G1CollectedHeap::increment_full_collections_completed(bool outer) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2138 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2139 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2140 // We have already incremented _total_full_collections at the start |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2141 // of the GC, so total_full_collections() represents how many full |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2142 // collections have been started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2143 unsigned int full_collections_started = total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2144 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2145 // Given that this method is called at the end of a Full GC or of a |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2146 // concurrent cycle, and those can be nested (i.e., a Full GC can |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2147 // interrupt a concurrent cycle), the number of full collections |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2148 // completed should be either one (in the case where there was no |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2149 // nesting) or two (when a Full GC interrupted a concurrent cycle) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2150 // behind the number of full collections started. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2151 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2152 // This is the case for the inner caller, i.e. a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2153 assert(outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2154 (full_collections_started == _full_collections_completed + 1) || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2155 (full_collections_started == _full_collections_completed + 2), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2156 err_msg("for inner caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2157 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2158 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2159 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2160 // This is the case for the outer caller, i.e. the concurrent cycle. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2161 assert(!outer || |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2162 (full_collections_started == _full_collections_completed + 1), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2163 err_msg("for outer caller: full_collections_started = %u " |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2164 "is inconsistent with _full_collections_completed = %u", |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2165 full_collections_started, _full_collections_completed)); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2166 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2167 _full_collections_completed += 1; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2168 |
1840
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2169 // We need to clear the "in_progress" flag in the CM thread before |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2170 // we wake up any waiters (especially when ExplicitInvokesConcurrent |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2171 // is set) so that if a waiter requests another System.gc() it doesn't |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2172 // incorrectly see that a marking cyle is still in progress. |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2173 if (outer) { |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2174 _cmThread->clear_in_progress(); |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2175 } |
4e0094bc41fa
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
johnc
parents:
1833
diff
changeset
|
2176 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2177 // This notify_all() will ensure that a thread that called |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2178 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2179 // and it's waiting for a full GC to finish will be woken up. It is |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2180 // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2181 FullGCCount_lock->notify_all(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2182 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2183 |
342 | 2184 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
2185 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
2186 assert(Heap_lock->is_locked(), "Precondition#2"); | |
2187 GCCauseSetter gcs(this, cause); | |
2188 switch (cause) { | |
2189 case GCCause::_heap_inspection: | |
2190 case GCCause::_heap_dump: { | |
2191 HandleMark hm; | |
2192 do_full_collection(false); // don't clear all soft refs | |
2193 break; | |
2194 } | |
2195 default: // XXX FIX ME | |
2196 ShouldNotReachHere(); // Unexpected use of this function | |
2197 } | |
2198 } | |
2199 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2200 void G1CollectedHeap::collect(GCCause::Cause cause) { |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2201 // The caller doesn't have the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2202 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2203 |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2204 unsigned int gc_count_before; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2205 unsigned int full_gc_count_before; |
342 | 2206 { |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2207 MutexLocker ml(Heap_lock); |
1973 | 2208 |
2209 // Don't want to do a GC until cleanup is completed. This | |
2210 // limitation will be removed in the near future when the | |
2211 // operation of the free region list is revamped as part of | |
2212 // CR 6977804. | |
2213 wait_for_cleanup_complete(); | |
2214 | |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2215 // Read the GC count while holding the Heap_lock |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2216 gc_count_before = SharedHeap::heap()->total_collections(); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2217 full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2218 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2219 |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2220 if (should_do_concurrent_full_gc(cause)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2221 // Schedule an initial-mark evacuation pause that will start a |
1973 | 2222 // concurrent cycle. We're setting word_size to 0 which means that |
2223 // we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2224 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2225 0, /* word_size */ |
2226 true, /* should_initiate_conc_mark */ | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2227 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2228 cause); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2229 VMThread::execute(&op); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2230 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2231 if (cause == GCCause::_gc_locker |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2232 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2233 |
1973 | 2234 // Schedule a standard evacuation pause. We're setting word_size |
2235 // to 0 which means that we are not requesting a post-GC allocation. | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2236 VM_G1IncCollectionPause op(gc_count_before, |
1973 | 2237 0, /* word_size */ |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2238 false, /* should_initiate_conc_mark */ |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2239 g1_policy()->max_pause_time_ms(), |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2240 cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2241 VMThread::execute(&op); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2242 } else { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2243 // Schedule a Full GC. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2244 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
1088
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2245 VMThread::execute(&op); |
3fc996d4edd2
6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ysr
parents:
1045
diff
changeset
|
2246 } |
342 | 2247 } |
2248 } | |
2249 | |
2250 bool G1CollectedHeap::is_in(const void* p) const { | |
2251 if (_g1_committed.contains(p)) { | |
2252 HeapRegion* hr = _hrs->addr_to_region(p); | |
2253 return hr->is_in(p); | |
2254 } else { | |
2255 return _perm_gen->as_gen()->is_in(p); | |
2256 } | |
2257 } | |
2258 | |
2259 // Iteration functions. | |
2260 | |
2261 // Iterates an OopClosure over all ref-containing fields of objects | |
2262 // within a HeapRegion. | |
2263 | |
2264 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
2265 MemRegion _mr; | |
2266 OopClosure* _cl; | |
2267 public: | |
2268 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
2269 : _mr(mr), _cl(cl) {} | |
2270 bool doHeapRegion(HeapRegion* r) { | |
2271 if (! r->continuesHumongous()) { | |
2272 r->oop_iterate(_cl); | |
2273 } | |
2274 return false; | |
2275 } | |
2276 }; | |
2277 | |
678 | 2278 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 2279 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
2280 _hrs->iterate(&blk); | |
678 | 2281 if (do_perm) { |
2282 perm_gen()->oop_iterate(cl); | |
2283 } | |
342 | 2284 } |
2285 | |
678 | 2286 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 2287 IterateOopClosureRegionClosure blk(mr, cl); |
2288 _hrs->iterate(&blk); | |
678 | 2289 if (do_perm) { |
2290 perm_gen()->oop_iterate(cl); | |
2291 } | |
342 | 2292 } |
2293 | |
2294 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
2295 | |
2296 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
2297 ObjectClosure* _cl; | |
2298 public: | |
2299 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
2300 bool doHeapRegion(HeapRegion* r) { | |
2301 if (! r->continuesHumongous()) { | |
2302 r->object_iterate(_cl); | |
2303 } | |
2304 return false; | |
2305 } | |
2306 }; | |
2307 | |
678 | 2308 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 2309 IterateObjectClosureRegionClosure blk(cl); |
2310 _hrs->iterate(&blk); | |
678 | 2311 if (do_perm) { |
2312 perm_gen()->object_iterate(cl); | |
2313 } | |
342 | 2314 } |
2315 | |
2316 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2317 // FIXME: is this right? | |
2318 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
2319 } | |
2320 | |
2321 // Calls a SpaceClosure on a HeapRegion. | |
2322 | |
2323 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
2324 SpaceClosure* _cl; | |
2325 public: | |
2326 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
2327 bool doHeapRegion(HeapRegion* r) { | |
2328 _cl->do_space(r); | |
2329 return false; | |
2330 } | |
2331 }; | |
2332 | |
2333 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
2334 SpaceClosureRegionClosure blk(cl); | |
2335 _hrs->iterate(&blk); | |
2336 } | |
2337 | |
2338 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
2339 _hrs->iterate(cl); | |
2340 } | |
2341 | |
2342 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
2343 HeapRegionClosure* cl) { | |
2344 _hrs->iterate_from(r, cl); | |
2345 } | |
2346 | |
2347 void | |
2348 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
2349 _hrs->iterate_from(idx, cl); | |
2350 } | |
2351 | |
2352 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
2353 | |
2354 void | |
2355 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
2356 int worker, | |
2357 jint claim_value) { | |
355 | 2358 const size_t regions = n_regions(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2359 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
355 | 2360 // try to spread out the starting points of the workers |
2361 const size_t start_index = regions / worker_num * (size_t) worker; | |
2362 | |
2363 // each worker will actually look at all regions | |
2364 for (size_t count = 0; count < regions; ++count) { | |
2365 const size_t index = (start_index + count) % regions; | |
2366 assert(0 <= index && index < regions, "sanity"); | |
2367 HeapRegion* r = region_at(index); | |
2368 // we'll ignore "continues humongous" regions (we'll process them | |
2369 // when we come across their corresponding "start humongous" | |
2370 // region) and regions already claimed | |
2371 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
2372 continue; | |
2373 } | |
2374 // OK, try to claim it | |
342 | 2375 if (r->claimHeapRegion(claim_value)) { |
355 | 2376 // success! |
2377 assert(!r->continuesHumongous(), "sanity"); | |
2378 if (r->startsHumongous()) { | |
2379 // If the region is "starts humongous" we'll iterate over its | |
2380 // "continues humongous" first; in fact we'll do them | |
2381 // first. The order is important. In on case, calling the | |
2382 // closure on the "starts humongous" region might de-allocate | |
2383 // and clear all its "continues humongous" regions and, as a | |
2384 // result, we might end up processing them twice. So, we'll do | |
2385 // them first (notice: most closures will ignore them anyway) and | |
2386 // then we'll do the "starts humongous" region. | |
2387 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
2388 HeapRegion* chr = region_at(ch_index); | |
2389 | |
2390 // if the region has already been claimed or it's not | |
2391 // "continues humongous" we're done | |
2392 if (chr->claim_value() == claim_value || | |
2393 !chr->continuesHumongous()) { | |
2394 break; | |
2395 } | |
2396 | |
2397 // Noone should have claimed it directly. We can given | |
2398 // that we claimed its "starts humongous" region. | |
2399 assert(chr->claim_value() != claim_value, "sanity"); | |
2400 assert(chr->humongous_start_region() == r, "sanity"); | |
2401 | |
2402 if (chr->claimHeapRegion(claim_value)) { | |
2403 // we should always be able to claim it; noone else should | |
2404 // be trying to claim this region | |
2405 | |
2406 bool res2 = cl->doHeapRegion(chr); | |
2407 assert(!res2, "Should not abort"); | |
2408 | |
2409 // Right now, this holds (i.e., no closure that actually | |
2410 // does something with "continues humongous" regions | |
2411 // clears them). We might have to weaken it in the future, | |
2412 // but let's leave these two asserts here for extra safety. | |
2413 assert(chr->continuesHumongous(), "should still be the case"); | |
2414 assert(chr->humongous_start_region() == r, "sanity"); | |
2415 } else { | |
2416 guarantee(false, "we should not reach here"); | |
2417 } | |
2418 } | |
2419 } | |
2420 | |
2421 assert(!r->continuesHumongous(), "sanity"); | |
2422 bool res = cl->doHeapRegion(r); | |
2423 assert(!res, "Should not abort"); | |
2424 } | |
2425 } | |
2426 } | |
2427 | |
390 | 2428 class ResetClaimValuesClosure: public HeapRegionClosure { |
2429 public: | |
2430 bool doHeapRegion(HeapRegion* r) { | |
2431 r->set_claim_value(HeapRegion::InitialClaimValue); | |
2432 return false; | |
2433 } | |
2434 }; | |
2435 | |
2436 void | |
2437 G1CollectedHeap::reset_heap_region_claim_values() { | |
2438 ResetClaimValuesClosure blk; | |
2439 heap_region_iterate(&blk); | |
2440 } | |
2441 | |
355 | 2442 #ifdef ASSERT |
2443 // This checks whether all regions in the heap have the correct claim | |
2444 // value. I also piggy-backed on this a check to ensure that the | |
2445 // humongous_start_region() information on "continues humongous" | |
2446 // regions is correct. | |
2447 | |
2448 class CheckClaimValuesClosure : public HeapRegionClosure { | |
2449 private: | |
2450 jint _claim_value; | |
2451 size_t _failures; | |
2452 HeapRegion* _sh_region; | |
2453 public: | |
2454 CheckClaimValuesClosure(jint claim_value) : | |
2455 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
2456 bool doHeapRegion(HeapRegion* r) { | |
2457 if (r->claim_value() != _claim_value) { | |
2458 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2459 "claim value = %d, should be %d", | |
2460 r->bottom(), r->end(), r->claim_value(), | |
2461 _claim_value); | |
2462 ++_failures; | |
2463 } | |
2464 if (!r->isHumongous()) { | |
2465 _sh_region = NULL; | |
2466 } else if (r->startsHumongous()) { | |
2467 _sh_region = r; | |
2468 } else if (r->continuesHumongous()) { | |
2469 if (r->humongous_start_region() != _sh_region) { | |
2470 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
2471 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
2472 r->bottom(), r->end(), | |
2473 r->humongous_start_region(), | |
2474 _sh_region); | |
2475 ++_failures; | |
342 | 2476 } |
2477 } | |
355 | 2478 return false; |
2479 } | |
2480 size_t failures() { | |
2481 return _failures; | |
2482 } | |
2483 }; | |
2484 | |
2485 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2486 CheckClaimValuesClosure cl(claim_value); | |
2487 heap_region_iterate(&cl); | |
2488 return cl.failures() == 0; | |
2489 } | |
2490 #endif // ASSERT | |
342 | 2491 |
2492 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2493 HeapRegion* r = g1_policy()->collection_set(); | |
2494 while (r != NULL) { | |
2495 HeapRegion* next = r->next_in_collection_set(); | |
2496 if (cl->doHeapRegion(r)) { | |
2497 cl->incomplete(); | |
2498 return; | |
2499 } | |
2500 r = next; | |
2501 } | |
2502 } | |
2503 | |
2504 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2505 HeapRegionClosure *cl) { | |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2506 if (r == NULL) { |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2507 // The CSet is empty so there's nothing to do. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2508 return; |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2509 } |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
2510 |
342 | 2511 assert(r->in_collection_set(), |
2512 "Start region must be a member of the collection set."); | |
2513 HeapRegion* cur = r; | |
2514 while (cur != NULL) { | |
2515 HeapRegion* next = cur->next_in_collection_set(); | |
2516 if (cl->doHeapRegion(cur) && false) { | |
2517 cl->incomplete(); | |
2518 return; | |
2519 } | |
2520 cur = next; | |
2521 } | |
2522 cur = g1_policy()->collection_set(); | |
2523 while (cur != r) { | |
2524 HeapRegion* next = cur->next_in_collection_set(); | |
2525 if (cl->doHeapRegion(cur) && false) { | |
2526 cl->incomplete(); | |
2527 return; | |
2528 } | |
2529 cur = next; | |
2530 } | |
2531 } | |
2532 | |
2533 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2534 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2535 } | |
2536 | |
2537 | |
2538 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2539 Space* res = heap_region_containing(addr); | |
2540 if (res == NULL) | |
2541 res = perm_gen()->space_containing(addr); | |
2542 return res; | |
2543 } | |
2544 | |
2545 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2546 Space* sp = space_containing(addr); | |
2547 if (sp != NULL) { | |
2548 return sp->block_start(addr); | |
2549 } | |
2550 return NULL; | |
2551 } | |
2552 | |
2553 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2554 Space* sp = space_containing(addr); | |
2555 assert(sp != NULL, "block_size of address outside of heap"); | |
2556 return sp->block_size(addr); | |
2557 } | |
2558 | |
2559 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2560 Space* sp = space_containing(addr); | |
2561 return sp->block_is_obj(addr); | |
2562 } | |
2563 | |
2564 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2565 return true; | |
2566 } | |
2567 | |
2568 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2569 return HeapRegion::GrainBytes; | |
2570 } | |
2571 | |
2572 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2573 // Return the remaining space in the cur alloc region, but not less than | |
2574 // the min TLAB size. | |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2575 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2576 // Also, this value can be at most the humongous object threshold, |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2577 // since we can't allow tlabs to grow big enough to accomodate |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2578 // humongous objects. |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2579 |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2580 // We need to store the cur alloc region locally, since it might change |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2581 // between when we test for NULL and when we use it later. |
342 | 2582 ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2583 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2584 |
342 | 2585 if (cur_alloc_space == NULL) { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2586 return max_tlab_size; |
342 | 2587 } else { |
1313
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2588 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), |
664ae0c5e0e5
6755988: G1: assert(new_obj != 0 || ... "should be forwarded")
johnc
parents:
1282
diff
changeset
|
2589 max_tlab_size); |
342 | 2590 } |
2591 } | |
2592 | |
2593 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2594 return false; | |
2595 } | |
2596 | |
2597 size_t G1CollectedHeap::large_typearray_limit() { | |
2598 // FIXME | |
2599 return HeapRegion::GrainBytes/HeapWordSize; | |
2600 } | |
2601 | |
2602 size_t G1CollectedHeap::max_capacity() const { | |
1092
ed52bcc32739
6880903: G1: G1 reports incorrect Runtime.maxMemory()
tonyp
parents:
1089
diff
changeset
|
2603 return g1_reserved_obj_bytes(); |
342 | 2604 } |
2605 | |
2606 jlong G1CollectedHeap::millis_since_last_gc() { | |
2607 // assert(false, "NYI"); | |
2608 return 0; | |
2609 } | |
2610 | |
2611 | |
2612 void G1CollectedHeap::prepare_for_verify() { | |
2613 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2614 ensure_parsability(false); | |
2615 } | |
2616 g1_rem_set()->prepare_for_verify(); | |
2617 } | |
2618 | |
2619 class VerifyLivenessOopClosure: public OopClosure { | |
2620 G1CollectedHeap* g1h; | |
2621 public: | |
2622 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2623 g1h = _g1h; | |
2624 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2625 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2626 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2627 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2628 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2629 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2630 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2631 "Dead object referenced by a not dead object"); |
342 | 2632 } |
2633 }; | |
2634 | |
2635 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2636 private: |
342 | 2637 G1CollectedHeap* _g1h; |
2638 size_t _live_bytes; | |
2639 HeapRegion *_hr; | |
811 | 2640 bool _use_prev_marking; |
342 | 2641 public: |
811 | 2642 // use_prev_marking == true -> use "prev" marking information, |
2643 // use_prev_marking == false -> use "next" marking information | |
2644 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2645 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2646 _g1h = G1CollectedHeap::heap(); |
2647 } | |
2648 void do_object(oop o) { | |
2649 VerifyLivenessOopClosure isLive(_g1h); | |
2650 assert(o != NULL, "Huh?"); | |
811 | 2651 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2652 o->oop_iterate(&isLive); |
1389
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2653 if (!_hr->obj_allocated_since_prev_marking(o)) { |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2654 size_t obj_size = o->size(); // Make sure we don't overflow |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2655 _live_bytes += (obj_size * HeapWordSize); |
5dbd9300cf9c
6943926: G1: Integer overflow during heap region verification
johnc
parents:
1388
diff
changeset
|
2656 } |
342 | 2657 } |
2658 } | |
2659 size_t live_bytes() { return _live_bytes; } | |
2660 }; | |
2661 | |
2662 class PrintObjsInRegionClosure : public ObjectClosure { | |
2663 HeapRegion *_hr; | |
2664 G1CollectedHeap *_g1; | |
2665 public: | |
2666 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2667 _g1 = G1CollectedHeap::heap(); | |
2668 }; | |
2669 | |
2670 void do_object(oop o) { | |
2671 if (o != NULL) { | |
2672 HeapWord *start = (HeapWord *) o; | |
2673 size_t word_sz = o->size(); | |
2674 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2675 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2676 (void*) o, word_sz, | |
2677 _g1->isMarkedPrev(o), | |
2678 _g1->isMarkedNext(o), | |
2679 _hr->obj_allocated_since_prev_marking(o)); | |
2680 HeapWord *end = start + word_sz; | |
2681 HeapWord *cur; | |
2682 int *val; | |
2683 for (cur = start; cur < end; cur++) { | |
2684 val = (int *) cur; | |
2685 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2686 } | |
2687 } | |
2688 } | |
2689 }; | |
2690 | |
2691 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2692 private: |
342 | 2693 bool _allow_dirty; |
390 | 2694 bool _par; |
811 | 2695 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2696 bool _failures; |
811 | 2697 public: |
2698 // use_prev_marking == true -> use "prev" marking information, | |
2699 // use_prev_marking == false -> use "next" marking information | |
2700 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2701 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2702 _par(par), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2703 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2704 _failures(false) {} |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2705 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2706 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2707 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2708 } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2709 |
342 | 2710 bool doHeapRegion(HeapRegion* r) { |
390 | 2711 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2712 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2713 if (!r->continuesHumongous()) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2714 bool failures = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2715 r->verify(_allow_dirty, _use_prev_marking, &failures); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2716 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2717 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2718 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2719 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2720 r->object_iterate(¬_dead_yet_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2721 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2722 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2723 "max_live_bytes "SIZE_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2724 "< calculated "SIZE_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2725 r->bottom(), r->end(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2726 r->max_live_bytes(), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2727 not_dead_yet_cl.live_bytes()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2728 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2729 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2730 } |
342 | 2731 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2732 return false; // stop the region iteration if we hit a failure |
342 | 2733 } |
2734 }; | |
2735 | |
2736 class VerifyRootsClosure: public OopsInGenClosure { | |
2737 private: | |
2738 G1CollectedHeap* _g1h; | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2739 bool _use_prev_marking; |
342 | 2740 bool _failures; |
2741 public: | |
811 | 2742 // use_prev_marking == true -> use "prev" marking information, |
2743 // use_prev_marking == false -> use "next" marking information | |
2744 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2745 _g1h(G1CollectedHeap::heap()), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2746 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2747 _failures(false) { } |
342 | 2748 |
2749 bool failures() { return _failures; } | |
2750 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2751 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2752 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2753 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2754 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2755 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2756 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2757 "points to dead obj "PTR_FORMAT, p, (void*) obj); |
342 | 2758 obj->print_on(gclog_or_tty); |
2759 _failures = true; | |
2760 } | |
2761 } | |
2762 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2763 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2764 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2765 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2766 }; |
2767 | |
390 | 2768 // This is the task used for parallel heap verification. |
2769 | |
2770 class G1ParVerifyTask: public AbstractGangTask { | |
2771 private: | |
2772 G1CollectedHeap* _g1h; | |
2773 bool _allow_dirty; | |
811 | 2774 bool _use_prev_marking; |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2775 bool _failures; |
390 | 2776 |
2777 public: | |
811 | 2778 // use_prev_marking == true -> use "prev" marking information, |
2779 // use_prev_marking == false -> use "next" marking information | |
2780 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2781 bool use_prev_marking) : | |
390 | 2782 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2783 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2784 _allow_dirty(allow_dirty), |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2785 _use_prev_marking(use_prev_marking), |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2786 _failures(false) { } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2787 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2788 bool failures() { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2789 return _failures; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2790 } |
390 | 2791 |
2792 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2793 HandleMark hm; |
811 | 2794 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2795 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2796 HeapRegion::ParVerifyClaimValue); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2797 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2798 _failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2799 } |
390 | 2800 } |
2801 }; | |
2802 | |
342 | 2803 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2804 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2805 } | |
2806 | |
2807 void G1CollectedHeap::verify(bool allow_dirty, | |
2808 bool silent, | |
2809 bool use_prev_marking) { | |
342 | 2810 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2811 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2812 VerifyRootsClosure rootsCl(use_prev_marking); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2813 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2814 process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2815 false, |
342 | 2816 SharedHeap::SO_AllClasses, |
2817 &rootsCl, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
2818 &blobsCl, |
342 | 2819 &rootsCl); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2820 bool failures = rootsCl.failures(); |
342 | 2821 rem_set()->invalidate(perm_gen()->used_region(), false); |
2822 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2823 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2824 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2825 "sanity check"); | |
2826 | |
811 | 2827 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2828 int n_workers = workers()->total_workers(); |
2829 set_par_threads(n_workers); | |
2830 workers()->run_task(&task); | |
2831 set_par_threads(0); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2832 if (task.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2833 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2834 } |
390 | 2835 |
2836 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2837 "sanity check"); | |
2838 | |
2839 reset_heap_region_claim_values(); | |
2840 | |
2841 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2842 "sanity check"); | |
2843 } else { | |
811 | 2844 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2845 _hrs->iterate(&blk); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2846 if (blk.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2847 failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2848 } |
390 | 2849 } |
342 | 2850 if (!silent) gclog_or_tty->print("remset "); |
2851 rem_set()->verify(); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2852 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2853 if (failures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2854 gclog_or_tty->print_cr("Heap:"); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2855 print_on(gclog_or_tty, true /* extended */); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2856 gclog_or_tty->print_cr(""); |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2857 #ifndef PRODUCT |
1044 | 2858 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
1388 | 2859 concurrent_mark()->print_reachable("at-verification-failure", |
2860 use_prev_marking, false /* all */); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2861 } |
1547
fb1a39993f69
6951319: enable solaris builds using Sun Studio 12 update 1
jcoomes
parents:
1545
diff
changeset
|
2862 #endif |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2863 gclog_or_tty->flush(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2864 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2865 guarantee(!failures, "there should not have been any failures"); |
342 | 2866 } else { |
2867 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2868 } | |
2869 } | |
2870 | |
2871 class PrintRegionClosure: public HeapRegionClosure { | |
2872 outputStream* _st; | |
2873 public: | |
2874 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2875 bool doHeapRegion(HeapRegion* r) { | |
2876 r->print_on(_st); | |
2877 return false; | |
2878 } | |
2879 }; | |
2880 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2881 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2882 |
2883 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2884 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2885 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2886 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2887 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2888 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2889 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2890 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2891 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2892 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2893 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2894 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2895 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2896 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2897 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2898 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2899 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2900 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2901 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2902 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2903 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2904 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2905 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2906 if (extended) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
1019
diff
changeset
|
2907 st->cr(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2908 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2909 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2910 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2911 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2912 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2913 PrintRegionClosure blk(st); |
2914 _hrs->iterate(&blk); | |
2915 } | |
2916 | |
2917 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2918 if (G1CollectedHeap::use_parallel_gc_threads()) { |
1019 | 2919 workers()->print_worker_threads_on(st); |
2920 } | |
2921 | |
2922 _cmThread->print_on(st); | |
342 | 2923 st->cr(); |
1019 | 2924 |
2925 _cm->print_worker_threads_on(st); | |
2926 | |
2927 _cg1r->print_worker_threads_on(st); | |
2928 | |
342 | 2929 _czft->print_on(st); |
2930 st->cr(); | |
2931 } | |
2932 | |
2933 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
2934 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 2935 workers()->threads_do(tc); |
2936 } | |
2937 tc->do_thread(_cmThread); | |
794 | 2938 _cg1r->threads_do(tc); |
342 | 2939 tc->do_thread(_czft); |
2940 } | |
2941 | |
2942 void G1CollectedHeap::print_tracing_info() const { | |
2943 // We'll overload this to mean "trace GC pause statistics." | |
2944 if (TraceGen0Time || TraceGen1Time) { | |
2945 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2946 // to that. | |
2947 g1_policy()->print_tracing_info(); | |
2948 } | |
751 | 2949 if (G1SummarizeRSetStats) { |
342 | 2950 g1_rem_set()->print_summary_info(); |
2951 } | |
1282 | 2952 if (G1SummarizeConcMark) { |
342 | 2953 concurrent_mark()->print_summary_info(); |
2954 } | |
751 | 2955 if (G1SummarizeZFStats) { |
342 | 2956 ConcurrentZFThread::print_summary_info(); |
2957 } | |
2958 g1_policy()->print_yg_surv_rate_info(); | |
2959 | |
2960 SpecializationStats::print(); | |
2961 } | |
2962 | |
2963 | |
2964 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2965 HeapRegion* hr = heap_region_containing(addr); | |
2966 if (hr == NULL) { | |
2967 return 0; | |
2968 } else { | |
2969 return 1; | |
2970 } | |
2971 } | |
2972 | |
2973 G1CollectedHeap* G1CollectedHeap::heap() { | |
2974 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2975 "not a garbage-first heap"); | |
2976 return _g1h; | |
2977 } | |
2978 | |
2979 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2980 // always_do_update_barrier = false; |
342 | 2981 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
2982 // Call allocation profiler | |
2983 AllocationProfiler::iterate_since_last_gc(); | |
2984 // Fill TLAB's and such | |
2985 ensure_parsability(true); | |
2986 } | |
2987 | |
2988 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2989 // FIXME: what is this about? | |
2990 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2991 // is set. | |
2992 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2993 "derived pointer present")); | |
1245
6484c4ee11cb
6904516: More object array barrier fixes, following up on 6906727
ysr
parents:
1166
diff
changeset
|
2994 // always_do_update_barrier = true; |
342 | 2995 } |
2996 | |
1973 | 2997 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
2998 unsigned int gc_count_before, | |
2999 bool* succeeded) { | |
3000 assert_heap_not_locked_and_not_at_safepoint(); | |
342 | 3001 g1_policy()->record_stop_world_start(); |
1973 | 3002 VM_G1IncCollectionPause op(gc_count_before, |
3003 word_size, | |
3004 false, /* should_initiate_conc_mark */ | |
3005 g1_policy()->max_pause_time_ms(), | |
3006 GCCause::_g1_inc_collection_pause); | |
3007 VMThread::execute(&op); | |
3008 | |
3009 HeapWord* result = op.result(); | |
3010 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); | |
3011 assert(result == NULL || ret_succeeded, | |
3012 "the result should be NULL if the VM did not succeed"); | |
3013 *succeeded = ret_succeeded; | |
3014 | |
3015 assert_heap_not_locked(); | |
3016 return result; | |
342 | 3017 } |
3018 | |
3019 void | |
3020 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3021 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3022 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3023 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3024 CGC_lock->notify(); |
342 | 3025 } |
3026 } | |
3027 | |
3028 class VerifyMarkedObjsClosure: public ObjectClosure { | |
3029 G1CollectedHeap* _g1h; | |
3030 public: | |
3031 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
3032 void do_object(oop obj) { | |
3033 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
3034 "markandsweep mark should agree with concurrent deadness"); | |
3035 } | |
3036 }; | |
3037 | |
3038 void | |
3039 G1CollectedHeap::checkConcurrentMark() { | |
3040 VerifyMarkedObjsClosure verifycl(this); | |
3041 // MutexLockerEx x(getMarkBitMapLock(), | |
3042 // Mutex::_no_safepoint_check_flag); | |
678 | 3043 object_iterate(&verifycl, false); |
342 | 3044 } |
3045 | |
3046 void G1CollectedHeap::do_sync_mark() { | |
3047 _cm->checkpointRootsInitial(); | |
3048 _cm->markFromRoots(); | |
3049 _cm->checkpointRootsFinal(false); | |
3050 } | |
3051 | |
3052 // <NEW PREDICTION> | |
3053 | |
3054 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
3055 bool young) { | |
3056 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
3057 } | |
3058 | |
3059 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
3060 predicted_time_ms) { | |
3061 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
3062 } | |
3063 | |
3064 size_t G1CollectedHeap::pending_card_num() { | |
3065 size_t extra_cards = 0; | |
3066 JavaThread *curr = Threads::first(); | |
3067 while (curr != NULL) { | |
3068 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
3069 extra_cards += dcq.size(); | |
3070 curr = curr->next(); | |
3071 } | |
3072 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3073 size_t buffer_size = dcqs.buffer_size(); | |
3074 size_t buffer_num = dcqs.completed_buffers_num(); | |
3075 return buffer_size * buffer_num + extra_cards; | |
3076 } | |
3077 | |
3078 size_t G1CollectedHeap::max_pending_card_num() { | |
3079 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
3080 size_t buffer_size = dcqs.buffer_size(); | |
3081 size_t buffer_num = dcqs.completed_buffers_num(); | |
3082 int thread_num = Threads::number_of_threads(); | |
3083 return (buffer_num + thread_num) * buffer_size; | |
3084 } | |
3085 | |
3086 size_t G1CollectedHeap::cards_scanned() { | |
1861 | 3087 return g1_rem_set()->cardsScanned(); |
342 | 3088 } |
3089 | |
3090 void | |
3091 G1CollectedHeap::setup_surviving_young_words() { | |
3092 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
3093 size_t array_length = g1_policy()->young_cset_length(); | |
3094 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3095 if (_surviving_young_words == NULL) { | |
3096 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
3097 "Not enough space for young surv words summary."); | |
3098 } | |
3099 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3100 #ifdef ASSERT |
342 | 3101 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3102 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3103 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3104 #endif // !ASSERT |
342 | 3105 } |
3106 | |
3107 void | |
3108 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
3109 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3110 size_t array_length = g1_policy()->young_cset_length(); | |
3111 for (size_t i = 0; i < array_length; ++i) | |
3112 _surviving_young_words[i] += surv_young_words[i]; | |
3113 } | |
3114 | |
3115 void | |
3116 G1CollectedHeap::cleanup_surviving_young_words() { | |
3117 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
3118 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
3119 _surviving_young_words = NULL; | |
3120 } | |
3121 | |
3122 // </NEW PREDICTION> | |
3123 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3124 struct PrepareForRSScanningClosure : public HeapRegionClosure { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3125 bool doHeapRegion(HeapRegion *r) { |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3126 r->rem_set()->set_iter_claimed(0); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3127 return false; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3128 } |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3129 }; |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
3130 |
1709 | 3131 #if TASKQUEUE_STATS |
3132 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { | |
3133 st->print_raw_cr("GC Task Stats"); | |
3134 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
3135 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
3136 } | |
3137 | |
3138 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { | |
3139 print_taskqueue_stats_hdr(st); | |
3140 | |
3141 TaskQueueStats totals; | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3142 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3143 for (int i = 0; i < n; ++i) { |
3144 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); | |
3145 totals += task_queue(i)->stats; | |
3146 } | |
3147 st->print_raw("tot "); totals.print(st); st->cr(); | |
3148 | |
3149 DEBUG_ONLY(totals.verify()); | |
3150 } | |
3151 | |
3152 void G1CollectedHeap::reset_taskqueue_stats() { | |
1755
8e5955ddf8e4
6978300: G1: debug builds crash if ParallelGCThreads==0
jcoomes
parents:
1719
diff
changeset
|
3153 const int n = workers() != NULL ? workers()->total_workers() : 1; |
1709 | 3154 for (int i = 0; i < n; ++i) { |
3155 task_queue(i)->stats.reset(); | |
3156 } | |
3157 } | |
3158 #endif // TASKQUEUE_STATS | |
3159 | |
1973 | 3160 bool |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3161 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3162 if (GC_locker::check_active_before_gc()) { |
1973 | 3163 return false; |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3164 } |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3165 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3166 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3167 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3168 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3169 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3170 { |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3171 ResourceMark rm; |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3172 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3173 // This call will decide whether this pause is an initial-mark |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3174 // pause. If it is, during_initial_mark_pause() will return true |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3175 // for the duration of this pause. |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3176 g1_policy()->decide_on_conc_mark_initiation(); |
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3177 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3178 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3179 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3180 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3181 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3182 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3183 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3184 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3185 } |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3186 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3187 strcat(verbose_str, " (initial-mark)"); |
1656
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3188 // We are about to start a marking cycle, so we increment the |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3189 // full collection counter. |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3190 increment_total_full_collections(); |
4e5661ba9d98
6944166: G1: explicit GCs are not always handled correctly
tonyp
parents:
1611
diff
changeset
|
3191 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3192 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3193 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3194 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3195 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3196 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3197 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3198 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3199 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3200 TraceMemoryManagerStats tms(false /* fullGC */); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3201 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3202 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3203 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3204 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3205 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3206 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3207 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3208 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3209 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3210 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3211 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3212 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3213 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3214 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3215 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3216 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3217 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3218 increment_total_collections(false /* full gc */); |
342 | 3219 |
3220 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3221 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3222 print(); |
342 | 3223 #endif |
3224 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3225 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3226 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3227 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3228 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3229 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3230 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3231 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3232 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3233 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3234 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3235 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3236 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3237 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3238 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3239 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3240 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3241 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3242 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3243 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3244 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3245 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3246 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3247 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3248 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3249 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3250 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3251 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3252 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3253 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3254 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3255 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3256 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3257 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3258 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3259 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3260 #endif // YOUNG_LIST_VERBOSE |
342 | 3261 |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3262 if (g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3263 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3264 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3265 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3266 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3267 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3268 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3269 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3270 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3271 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3272 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3273 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3274 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3275 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3276 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3277 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3278 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3279 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3280 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3281 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3282 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3283 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3284 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3285 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3286 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3287 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3288 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3289 _young_list->print(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3290 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3291 #endif // YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3292 |
1707 | 3293 g1_policy()->choose_collection_set(target_pause_time_ms); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3294 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3295 // Nothing to do if we were unable to choose a collection set. |
342 | 3296 #if G1_REM_SET_LOGGING |
1707 | 3297 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
3298 print(); | |
342 | 3299 #endif |
1707 | 3300 PrepareForRSScanningClosure prepare_for_rs_scan; |
3301 collection_set_iterate(&prepare_for_rs_scan); | |
3302 | |
3303 setup_surviving_young_words(); | |
3304 | |
3305 // Set up the gc allocation regions. | |
3306 get_gc_alloc_regions(); | |
3307 | |
3308 // Actually do the work... | |
3309 evacuate_collection_set(); | |
3310 | |
3311 free_collection_set(g1_policy()->collection_set()); | |
3312 g1_policy()->clear_collection_set(); | |
3313 | |
3314 cleanup_surviving_young_words(); | |
3315 | |
3316 // Start a new incremental collection set for the next pause. | |
3317 g1_policy()->start_incremental_cset_building(); | |
3318 | |
3319 // Clear the _cset_fast_test bitmap in anticipation of adding | |
3320 // regions to the incremental collection set for the next | |
3321 // evacuation pause. | |
3322 clear_cset_fast_test(); | |
3323 | |
3324 if (g1_policy()->in_young_gc_mode()) { | |
3325 _young_list->reset_sampled_info(); | |
3326 | |
3327 // Don't check the whole heap at this point as the | |
3328 // GC alloc regions from this pause have been tagged | |
3329 // as survivors and moved on to the survivor list. | |
3330 // Survivor regions will fail the !is_young() check. | |
3331 assert(check_young_list_empty(false /* check_heap */), | |
3332 "young list should be empty"); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3333 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3334 #if YOUNG_LIST_VERBOSE |
1707 | 3335 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
3336 _young_list->print(); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3337 #endif // YOUNG_LIST_VERBOSE |
342 | 3338 |
1707 | 3339 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3340 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3341 _young_list->last_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3342 |
1707 | 3343 _young_list->reset_auxilary_lists(); |
342 | 3344 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3345 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3346 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3347 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3348 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3349 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3350 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3351 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3352 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3353 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3354 if (g1_policy()->in_young_gc_mode() && |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
3355 g1_policy()->during_initial_mark_pause()) { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3356 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3357 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3358 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3359 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3360 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3361 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3362 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3363 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3364 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3365 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3366 } |
342 | 3367 |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3368 #if YOUNG_LIST_VERBOSE |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3369 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3370 _young_list->print(); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3371 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
3372 #endif // YOUNG_LIST_VERBOSE |
342 | 3373 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3374 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3375 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3376 g1_policy()->record_pause_time_ms(pause_time_ms); |
1707 | 3377 g1_policy()->record_collection_pause_end(); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3378 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3379 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3380 |
1089
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3381 MemoryService::track_memory_usage(); |
db0d5eba9d20
6815790: G1: Missing MemoryPoolMXBeans with -XX:+UseG1GC
tonyp
parents:
1088
diff
changeset
|
3382 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3383 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3384 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3385 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3386 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3387 Universe::verify(false); |
342 | 3388 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3389 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3390 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3391 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3392 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3393 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3394 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3395 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3396 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3397 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3398 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3399 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3400 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3401 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3402 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3403 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3404 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3405 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
3406 #endif |
342 | 3407 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3408 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3409 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3410 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3411 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3412 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3413 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3414 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3415 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3416 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3417 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3418 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3419 |
1709 | 3420 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
3421 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); | |
3422 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3423 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
3424 Universe::print_heap_after_gc(); |
342 | 3425 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3426 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3427 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3428 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3429 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
3430 } |
1973 | 3431 |
3432 return true; | |
342 | 3433 } |
3434 | |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3435 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3436 { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3437 size_t gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3438 switch (purpose) { |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3439 case GCAllocForSurvived: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3440 gclab_word_size = YoungPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3441 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3442 case GCAllocForTenured: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3443 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3444 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3445 default: |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3446 assert(false, "unknown GCAllocPurpose"); |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3447 gclab_word_size = OldPLABSize; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3448 break; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3449 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3450 return gclab_word_size; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3451 } |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3452 |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
3453 |
342 | 3454 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
3455 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 3456 // make sure we don't call set_gc_alloc_region() multiple times on |
3457 // the same region | |
3458 assert(r == NULL || !r->is_gc_alloc_region(), | |
3459 "shouldn't already be a GC alloc region"); | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3460 assert(r == NULL || !r->isHumongous(), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3461 "humongous regions shouldn't be used as GC alloc regions"); |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3462 |
342 | 3463 HeapWord* original_top = NULL; |
3464 if (r != NULL) | |
3465 original_top = r->top(); | |
3466 | |
3467 // We will want to record the used space in r as being there before gc. | |
3468 // One we install it as a GC alloc region it's eligible for allocation. | |
3469 // So record it now and use it later. | |
3470 size_t r_used = 0; | |
3471 if (r != NULL) { | |
3472 r_used = r->used(); | |
3473 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
3474 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 3475 // need to take the lock to guard against two threads calling |
3476 // get_gc_alloc_region concurrently (very unlikely but...) | |
3477 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
3478 r->save_marks(); | |
3479 } | |
3480 } | |
3481 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
3482 _gc_alloc_regions[purpose] = r; | |
3483 if (old_alloc_region != NULL) { | |
3484 // Replace aliases too. | |
3485 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3486 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
3487 _gc_alloc_regions[ap] = r; | |
3488 } | |
3489 } | |
3490 } | |
3491 if (r != NULL) { | |
3492 push_gc_alloc_region(r); | |
3493 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
3494 // We are using a region as a GC alloc region after it has been used | |
3495 // as a mutator allocation region during the current marking cycle. | |
3496 // The mutator-allocated objects are currently implicitly marked, but | |
3497 // when we move hr->next_top_at_mark_start() forward at the the end | |
3498 // of the GC pause, they won't be. We therefore mark all objects in | |
3499 // the "gap". We do this object-by-object, since marking densely | |
3500 // does not currently work right with marking bitmap iteration. This | |
3501 // means we rely on TLAB filling at the start of pauses, and no | |
3502 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
3503 // to fix the marking bitmap iteration. | |
3504 HeapWord* curhw = r->next_top_at_mark_start(); | |
3505 HeapWord* t = original_top; | |
3506 | |
3507 while (curhw < t) { | |
3508 oop cur = (oop)curhw; | |
3509 // We'll assume parallel for generality. This is rare code. | |
3510 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
3511 curhw = curhw + cur->size(); | |
3512 } | |
3513 assert(curhw == t, "Should have parsed correctly."); | |
3514 } | |
3515 if (G1PolicyVerbose > 1) { | |
3516 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
3517 "for survivors:", r->bottom(), original_top, r->end()); | |
3518 r->print(); | |
3519 } | |
3520 g1_policy()->record_before_bytes(r_used); | |
3521 } | |
3522 } | |
3523 | |
3524 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
3525 assert(Thread::current()->is_VM_thread() || | |
3526 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
3527 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
3528 "Precondition."); | |
3529 hr->set_is_gc_alloc_region(true); | |
3530 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
3531 _gc_alloc_region_list = hr; | |
3532 } | |
3533 | |
3534 #ifdef G1_DEBUG | |
3535 class FindGCAllocRegion: public HeapRegionClosure { | |
3536 public: | |
3537 bool doHeapRegion(HeapRegion* r) { | |
3538 if (r->is_gc_alloc_region()) { | |
3539 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
3540 r->hrs_index(), r->bottom()); | |
3541 } | |
3542 return false; | |
3543 } | |
3544 }; | |
3545 #endif // G1_DEBUG | |
3546 | |
3547 void G1CollectedHeap::forget_alloc_region_list() { | |
3548 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
3549 while (_gc_alloc_region_list != NULL) { | |
3550 HeapRegion* r = _gc_alloc_region_list; | |
3551 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3552 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3553 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3554 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3555 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3556 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3557 r->ContiguousSpace::set_saved_mark(); |
342 | 3558 _gc_alloc_region_list = r->next_gc_alloc_region(); |
3559 r->set_next_gc_alloc_region(NULL); | |
3560 r->set_is_gc_alloc_region(false); | |
545 | 3561 if (r->is_survivor()) { |
3562 if (r->is_empty()) { | |
3563 r->set_not_young(); | |
3564 } else { | |
3565 _young_list->add_survivor_region(r); | |
3566 } | |
3567 } | |
342 | 3568 if (r->is_empty()) { |
3569 ++_free_regions; | |
3570 } | |
3571 } | |
3572 #ifdef G1_DEBUG | |
3573 FindGCAllocRegion fa; | |
3574 heap_region_iterate(&fa); | |
3575 #endif // G1_DEBUG | |
3576 } | |
3577 | |
3578 | |
3579 bool G1CollectedHeap::check_gc_alloc_regions() { | |
3580 // TODO: allocation regions check | |
3581 return true; | |
3582 } | |
3583 | |
3584 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 3585 // First, let's check that the GC alloc region list is empty (it should) |
3586 assert(_gc_alloc_region_list == NULL, "invariant"); | |
3587 | |
342 | 3588 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 3589 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3590 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 3591 |
342 | 3592 // Create new GC alloc regions. |
636 | 3593 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
3594 _retained_gc_alloc_regions[ap] = NULL; | |
3595 | |
3596 if (alloc_region != NULL) { | |
3597 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
3598 | |
3599 // let's make sure that the GC alloc region is not tagged as such | |
3600 // outside a GC operation | |
3601 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3602 | |
3603 if (alloc_region->in_collection_set() || | |
3604 alloc_region->top() == alloc_region->end() || | |
1360
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3605 alloc_region->top() == alloc_region->bottom() || |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3606 alloc_region->isHumongous()) { |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3607 // we will discard the current GC alloc region if |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3608 // * it's in the collection set (it can happen!), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3609 // * it's already full (no point in using it), |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3610 // * it's empty (this means that it was emptied during |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3611 // a cleanup and it should be on the free list now), or |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3612 // * it's humongous (this means that it was emptied |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3613 // during a cleanup and was added to the free list, but |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3614 // has been subseqently used to allocate a humongous |
bda703475ded
6940894: G1: assert(new_obj != 0 || ... "should be forwarded") for compaction tests
johnc
parents:
1359
diff
changeset
|
3615 // object that may be less than the region size). |
636 | 3616 |
3617 alloc_region = NULL; | |
3618 } | |
3619 } | |
3620 | |
3621 if (alloc_region == NULL) { | |
3622 // we will get a new GC alloc region | |
342 | 3623 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3624 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3625 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3626 ++_gc_alloc_region_counts[ap]; |
1388 | 3627 if (G1PrintHeapRegions) { |
3628 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
3629 "top "PTR_FORMAT, | |
3630 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); | |
3631 } | |
342 | 3632 } |
636 | 3633 |
342 | 3634 if (alloc_region != NULL) { |
636 | 3635 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3636 set_gc_alloc_region(ap, alloc_region); |
3637 } | |
636 | 3638 |
3639 assert(_gc_alloc_regions[ap] == NULL || | |
3640 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3641 "the GC alloc region should be tagged as such"); | |
3642 assert(_gc_alloc_regions[ap] == NULL || | |
3643 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3644 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3645 } |
3646 // Set alternative regions for allocation purposes that have reached | |
636 | 3647 // their limit. |
342 | 3648 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3649 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3650 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3651 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3652 } | |
3653 } | |
3654 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3655 } | |
3656 | |
636 | 3657 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3658 // We keep a separate list of all regions that have been alloc regions in |
636 | 3659 // the current collection pause. Forget that now. This method will |
3660 // untag the GC alloc regions and tear down the GC alloc region | |
3661 // list. It's desirable that no regions are tagged as GC alloc | |
3662 // outside GCs. | |
342 | 3663 forget_alloc_region_list(); |
3664 | |
3665 // The current alloc regions contain objs that have survived | |
3666 // collection. Make them no longer GC alloc regions. | |
3667 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3668 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3669 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3670 _gc_alloc_region_counts[ap] = 0; |
636 | 3671 |
3672 if (r != NULL) { | |
3673 // we retain nothing on _gc_alloc_regions between GCs | |
3674 set_gc_alloc_region(ap, NULL); | |
3675 | |
3676 if (r->is_empty()) { | |
3677 // we didn't actually allocate anything in it; let's just put | |
3678 // it on the free list | |
342 | 3679 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3680 r->set_zero_fill_complete(); | |
3681 put_free_region_on_list_locked(r); | |
636 | 3682 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3683 // retain it so that we can use it at the beginning of the next GC | |
3684 _retained_gc_alloc_regions[ap] = r; | |
342 | 3685 } |
3686 } | |
636 | 3687 } |
3688 } | |
3689 | |
3690 #ifndef PRODUCT | |
3691 // Useful for debugging | |
3692 | |
3693 void G1CollectedHeap::print_gc_alloc_regions() { | |
3694 gclog_or_tty->print_cr("GC alloc regions"); | |
3695 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3696 HeapRegion* r = _gc_alloc_regions[ap]; | |
3697 if (r == NULL) { | |
3698 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3699 } else { | |
3700 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3701 ap, r->bottom(), r->used()); | |
3702 } | |
3703 } | |
3704 } | |
3705 #endif // PRODUCT | |
342 | 3706 |
3707 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3708 _drain_in_progress = false; | |
3709 set_evac_failure_closure(cl); | |
3710 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3711 } | |
3712 | |
3713 void G1CollectedHeap::finalize_for_evac_failure() { | |
3714 assert(_evac_failure_scan_stack != NULL && | |
3715 _evac_failure_scan_stack->length() == 0, | |
3716 "Postcondition"); | |
3717 assert(!_drain_in_progress, "Postcondition"); | |
1045 | 3718 delete _evac_failure_scan_stack; |
342 | 3719 _evac_failure_scan_stack = NULL; |
3720 } | |
3721 | |
3722 | |
3723 | |
3724 // *** Sequential G1 Evacuation | |
3725 | |
3726 class G1IsAliveClosure: public BoolObjectClosure { | |
3727 G1CollectedHeap* _g1; | |
3728 public: | |
3729 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3730 void do_object(oop p) { assert(false, "Do not call."); } | |
3731 bool do_object_b(oop p) { | |
3732 // It is reachable if it is outside the collection set, or is inside | |
3733 // and forwarded. | |
3734 | |
3735 #ifdef G1_DEBUG | |
3736 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3737 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3738 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3739 #endif // G1_DEBUG | |
3740 | |
3741 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3742 } | |
3743 }; | |
3744 | |
3745 class G1KeepAliveClosure: public OopClosure { | |
3746 G1CollectedHeap* _g1; | |
3747 public: | |
3748 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3749 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3750 void do_oop( oop* p) { |
342 | 3751 oop obj = *p; |
3752 #ifdef G1_DEBUG | |
3753 if (PrintGC && Verbose) { | |
3754 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3755 p, (void*) obj, (void*) *p); | |
3756 } | |
3757 #endif // G1_DEBUG | |
3758 | |
3759 if (_g1->obj_in_cs(obj)) { | |
3760 assert( obj->is_forwarded(), "invariant" ); | |
3761 *p = obj->forwardee(); | |
3762 #ifdef G1_DEBUG | |
3763 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3764 (void*) obj, (void*) *p); | |
3765 #endif // G1_DEBUG | |
3766 } | |
3767 } | |
3768 }; | |
3769 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3770 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3771 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3772 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3773 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3774 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3775 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3776 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3777 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3778 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3779 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3780 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3781 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3782 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3783 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3784 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3785 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3786 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3787 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3788 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3789 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3790 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3791 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3792 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3793 |
342 | 3794 class RemoveSelfPointerClosure: public ObjectClosure { |
3795 private: | |
3796 G1CollectedHeap* _g1; | |
3797 ConcurrentMark* _cm; | |
3798 HeapRegion* _hr; | |
3799 size_t _prev_marked_bytes; | |
3800 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3801 OopsInHeapRegionClosure *_cl; |
342 | 3802 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3803 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3804 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3805 _next_marked_bytes(0), _cl(cl) {} |
342 | 3806 |
3807 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3808 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3809 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3810 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3811 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3812 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3813 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3814 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3815 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3816 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3817 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3818 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3819 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3820 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3821 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3822 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3823 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3824 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3825 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3826 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3827 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3828 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3829 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3830 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3831 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3832 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3833 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3834 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3835 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3836 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3837 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3838 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3839 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3840 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3841 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3842 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3843 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3844 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3845 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3846 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3847 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3848 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3849 CollectedHeap::fill_with_object(mr); |
342 | 3850 _cm->clearRangeBothMaps(mr); |
3851 } | |
3852 } | |
3853 }; | |
3854 | |
3855 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
1705 | 3856 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3857 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3858 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3859 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3860 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3861 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3862 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3863 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3864 } |
342 | 3865 HeapRegion* cur = g1_policy()->collection_set(); |
3866 while (cur != NULL) { | |
3867 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3868 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3869 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3870 if (cur->evacuation_failed()) { |
3871 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3872 cl->set_region(cur); |
342 | 3873 cur->object_iterate(&rspc); |
3874 | |
3875 // A number of manipulations to make the TAMS be the current top, | |
3876 // and the marked bytes be the ones observed in the iteration. | |
3877 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3878 // The comments below are the postconditions achieved by the | |
3879 // calls. Note especially the last such condition, which says that | |
3880 // the count of marked bytes has been properly restored. | |
3881 cur->note_start_of_marking(false); | |
3882 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3883 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3884 // _next_marked_bytes == prev_marked_bytes. | |
3885 cur->note_end_of_marking(); | |
3886 // _prev_top_at_mark_start == top(), | |
3887 // _prev_marked_bytes == prev_marked_bytes | |
3888 } | |
3889 // If there is no mark in progress, we modified the _next variables | |
3890 // above needlessly, but harmlessly. | |
3891 if (_g1h->mark_in_progress()) { | |
3892 cur->note_start_of_marking(false); | |
3893 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3894 // _next_marked_bytes == next_marked_bytes. | |
3895 } | |
3896 | |
3897 // Now make sure the region has the right index in the sorted array. | |
3898 g1_policy()->note_change_in_marked_bytes(cur); | |
3899 } | |
3900 cur = cur->next_in_collection_set(); | |
3901 } | |
3902 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3903 | |
3904 // Now restore saved marks, if any. | |
3905 if (_objs_with_preserved_marks != NULL) { | |
3906 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3907 assert(_objs_with_preserved_marks->length() == | |
3908 _preserved_marks_of_objs->length(), "Both or none."); | |
3909 guarantee(_objs_with_preserved_marks->length() == | |
3910 _preserved_marks_of_objs->length(), "Both or none."); | |
3911 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3912 oop obj = _objs_with_preserved_marks->at(i); | |
3913 markOop m = _preserved_marks_of_objs->at(i); | |
3914 obj->set_mark(m); | |
3915 } | |
3916 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3917 delete _objs_with_preserved_marks; | |
3918 delete _preserved_marks_of_objs; | |
3919 _objs_with_preserved_marks = NULL; | |
3920 _preserved_marks_of_objs = NULL; | |
3921 } | |
3922 } | |
3923 | |
3924 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3925 _evac_failure_scan_stack->push(obj); | |
3926 } | |
3927 | |
3928 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3929 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3930 | |
3931 while (_evac_failure_scan_stack->length() > 0) { | |
3932 oop obj = _evac_failure_scan_stack->pop(); | |
3933 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3934 obj->oop_iterate_backwards(_evac_failure_closure); | |
3935 } | |
3936 } | |
3937 | |
3938 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3939 markOop m = old->mark(); | |
3940 // forward to self | |
3941 assert(!old->is_forwarded(), "precondition"); | |
3942 | |
3943 old->forward_to(old); | |
3944 handle_evacuation_failure_common(old, m); | |
3945 } | |
3946 | |
3947 oop | |
3948 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3949 oop old) { | |
3950 markOop m = old->mark(); | |
3951 oop forward_ptr = old->forward_to_atomic(old); | |
3952 if (forward_ptr == NULL) { | |
3953 // Forward-to-self succeeded. | |
3954 if (_evac_failure_closure != cl) { | |
3955 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3956 assert(!_drain_in_progress, | |
3957 "Should only be true while someone holds the lock."); | |
3958 // Set the global evac-failure closure to the current thread's. | |
3959 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3960 set_evac_failure_closure(cl); | |
3961 // Now do the common part. | |
3962 handle_evacuation_failure_common(old, m); | |
3963 // Reset to NULL. | |
3964 set_evac_failure_closure(NULL); | |
3965 } else { | |
3966 // The lock is already held, and this is recursive. | |
3967 assert(_drain_in_progress, "This should only be the recursive case."); | |
3968 handle_evacuation_failure_common(old, m); | |
3969 } | |
3970 return old; | |
3971 } else { | |
3972 // Someone else had a place to copy it. | |
3973 return forward_ptr; | |
3974 } | |
3975 } | |
3976 | |
3977 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3978 set_evacuation_failed(true); | |
3979 | |
3980 preserve_mark_if_necessary(old, m); | |
3981 | |
3982 HeapRegion* r = heap_region_containing(old); | |
3983 if (!r->evacuation_failed()) { | |
3984 r->set_evacuation_failed(true); | |
1282 | 3985 if (G1PrintHeapRegions) { |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
3986 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
342 | 3987 "["PTR_FORMAT","PTR_FORMAT")\n", |
3988 r, r->bottom(), r->end()); | |
3989 } | |
3990 } | |
3991 | |
3992 push_on_evac_failure_scan_stack(old); | |
3993 | |
3994 if (!_drain_in_progress) { | |
3995 // prevent recursion in copy_to_survivor_space() | |
3996 _drain_in_progress = true; | |
3997 drain_evac_failure_scan_stack(); | |
3998 _drain_in_progress = false; | |
3999 } | |
4000 } | |
4001 | |
4002 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
4003 if (m != markOopDesc::prototype()) { | |
4004 if (_objs_with_preserved_marks == NULL) { | |
4005 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
4006 _objs_with_preserved_marks = | |
4007 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
4008 _preserved_marks_of_objs = | |
4009 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
4010 } | |
4011 _objs_with_preserved_marks->push(obj); | |
4012 _preserved_marks_of_objs->push(m); | |
4013 } | |
4014 } | |
4015 | |
4016 // *** Parallel G1 Evacuation | |
4017 | |
4018 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
4019 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4020 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4021 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4022 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4023 |
342 | 4024 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
4025 // let the caller handle alloc failure | |
4026 if (alloc_region == NULL) return NULL; | |
4027 | |
4028 HeapWord* block = alloc_region->par_allocate(word_size); | |
4029 if (block == NULL) { | |
4030 MutexLockerEx x(par_alloc_during_gc_lock(), | |
4031 Mutex::_no_safepoint_check_flag); | |
4032 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
4033 } | |
4034 return block; | |
4035 } | |
4036 | |
545 | 4037 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
4038 bool par) { | |
4039 // Another thread might have obtained alloc_region for the given | |
4040 // purpose, and might be attempting to allocate in it, and might | |
4041 // succeed. Therefore, we can't do the "finalization" stuff on the | |
4042 // region below until we're sure the last allocation has happened. | |
4043 // We ensure this by allocating the remaining space with a garbage | |
4044 // object. | |
4045 if (par) par_allocate_remaining_space(alloc_region); | |
4046 // Now we can do the post-GC stuff on the region. | |
4047 alloc_region->note_end_of_copying(); | |
4048 g1_policy()->record_after_bytes(alloc_region->used()); | |
4049 } | |
4050 | |
342 | 4051 HeapWord* |
4052 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
4053 HeapRegion* alloc_region, | |
4054 bool par, | |
4055 size_t word_size) { | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4056 assert(!isHumongous(word_size), |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4057 err_msg("we should not be seeing humongous allocation requests " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4058 "during GC, word_size = "SIZE_FORMAT, word_size)); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1717
diff
changeset
|
4059 |
342 | 4060 HeapWord* block = NULL; |
4061 // In the parallel case, a previous thread to obtain the lock may have | |
4062 // already assigned a new gc_alloc_region. | |
4063 if (alloc_region != _gc_alloc_regions[purpose]) { | |
4064 assert(par, "But should only happen in parallel case."); | |
4065 alloc_region = _gc_alloc_regions[purpose]; | |
4066 if (alloc_region == NULL) return NULL; | |
4067 block = alloc_region->par_allocate(word_size); | |
4068 if (block != NULL) return block; | |
4069 // Otherwise, continue; this new region is empty, too. | |
4070 } | |
4071 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 4072 retire_alloc_region(alloc_region, par); |
342 | 4073 |
4074 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
4075 // Cannot allocate more regions for the given purpose. | |
4076 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
4077 // Is there an alternative? | |
4078 if (purpose != alt_purpose) { | |
4079 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
4080 // Has not the alternative region been aliased? | |
545 | 4081 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 4082 // Try to allocate in the alternative region. |
4083 if (par) { | |
4084 block = alt_region->par_allocate(word_size); | |
4085 } else { | |
4086 block = alt_region->allocate(word_size); | |
4087 } | |
4088 // Make an alias. | |
4089 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 4090 if (block != NULL) { |
4091 return block; | |
4092 } | |
4093 retire_alloc_region(alt_region, par); | |
342 | 4094 } |
4095 // Both the allocation region and the alternative one are full | |
4096 // and aliased, replace them with a new allocation region. | |
4097 purpose = alt_purpose; | |
4098 } else { | |
4099 set_gc_alloc_region(purpose, NULL); | |
4100 return NULL; | |
4101 } | |
4102 } | |
4103 | |
4104 // Now allocate a new region for allocation. | |
4105 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
4106 | |
4107 // let the caller handle alloc failure | |
4108 if (alloc_region != NULL) { | |
4109 | |
4110 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
4111 assert(alloc_region->saved_mark_at_top(), | |
4112 "Mark should have been saved already."); | |
4113 // We used to assert that the region was zero-filled here, but no | |
4114 // longer. | |
4115 | |
4116 // This must be done last: once it's installed, other regions may | |
4117 // allocate in it (without holding the lock.) | |
4118 set_gc_alloc_region(purpose, alloc_region); | |
4119 | |
4120 if (par) { | |
4121 block = alloc_region->par_allocate(word_size); | |
4122 } else { | |
4123 block = alloc_region->allocate(word_size); | |
4124 } | |
4125 // Caller handles alloc failure. | |
4126 } else { | |
4127 // This sets other apis using the same old alloc region to NULL, also. | |
4128 set_gc_alloc_region(purpose, NULL); | |
4129 } | |
4130 return block; // May be NULL. | |
4131 } | |
4132 | |
4133 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
4134 HeapWord* block = NULL; | |
4135 size_t free_words; | |
4136 do { | |
4137 free_words = r->free()/HeapWordSize; | |
4138 // If there's too little space, no one can allocate, so we're done. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1547
diff
changeset
|
4139 if (free_words < CollectedHeap::min_fill_size()) return; |
342 | 4140 // Otherwise, try to claim it. |
4141 block = r->par_allocate(free_words); | |
4142 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
4143 fill_with_object(block, free_words); |
342 | 4144 } |
4145 | |
4146 #ifndef PRODUCT | |
4147 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
4148 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
4149 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
4150 return true; | |
4151 } | |
4152 #endif // PRODUCT | |
4153 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4154 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4155 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4156 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4157 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4158 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4159 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4160 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4161 _term_attempts(0), |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4162 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4163 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4164 _age_table(false), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4165 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4166 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4167 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4168 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4169 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4170 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4171 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4172 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4173 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4174 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4175 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4176 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4177 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4178 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4179 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4180 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4181 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4182 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4183 |
1391
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4184 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4185 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
79e419e5ea3b
6942253: G1: replace G1ParallelGCAllocBufferSize with YoungPLABSize and OldPLABSize
apetrusenko
parents:
1390
diff
changeset
|
4186 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4187 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4188 } |
342 | 4189 |
1709 | 4190 void |
4191 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) | |
4192 { | |
4193 st->print_raw_cr("GC Termination Stats"); | |
4194 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" | |
4195 " ------waste (KiB)------"); | |
4196 st->print_raw_cr("thr ms ms % ms % attempts" | |
4197 " total alloc undo"); | |
4198 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" | |
4199 " ------- ------- -------"); | |
4200 } | |
4201 | |
4202 void | |
4203 G1ParScanThreadState::print_termination_stats(int i, | |
4204 outputStream* const st) const | |
4205 { | |
4206 const double elapsed_ms = elapsed_time() * 1000.0; | |
4207 const double s_roots_ms = strong_roots_time() * 1000.0; | |
4208 const double term_ms = term_time() * 1000.0; | |
4209 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
4210 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " | |
4211 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), | |
4212 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
4213 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), | |
4214 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, | |
4215 alloc_buffer_waste() * HeapWordSize / K, | |
4216 undo_waste() * HeapWordSize / K); | |
4217 } | |
4218 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4219 #ifdef ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4220 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4221 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4222 assert(UseCompressedOops, "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4223 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4224 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4225 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4226 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4227 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4228 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4229 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4230 bool G1ParScanThreadState::verify_ref(oop* ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4231 assert(ref != NULL, "invariant"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4232 if (has_partial_array_mask(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4233 // Must be in the collection set--it's already been copied. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4234 oop p = clear_partial_array_mask(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4235 assert(_g1h->obj_in_cs(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4236 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4237 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4238 oop p = oopDesc::load_decode_heap_oop(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4239 assert(_g1h->is_in_g1_reserved(p), |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4240 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4241 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4242 return true; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4243 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4244 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4245 bool G1ParScanThreadState::verify_task(StarTask ref) const { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4246 if (ref.is_narrow()) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4247 return verify_ref((narrowOop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4248 } else { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4249 return verify_ref((oop*) ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4250 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4251 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4252 #endif // ASSERT |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4253 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4254 void G1ParScanThreadState::trim_queue() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4255 StarTask ref; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4256 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4257 // Drain the overflow stack first, so other threads can steal. |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4258 while (refs()->pop_overflow(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4259 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4260 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4261 while (refs()->pop_local(ref)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4262 deal_with_reference(ref); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4263 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4264 } while (!refs()->is_empty()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4265 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4266 |
342 | 4267 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
4268 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4269 _par_scan_state(par_scan_state) { } | |
4270 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4271 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 4272 // This is called _after_ do_oop_work has been called, hence after |
4273 // the object has been relocated to its new location and *p points | |
4274 // to its new location. | |
4275 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4276 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4277 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4278 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4279 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 4280 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4281 HeapWord* addr = (HeapWord*)obj; |
342 | 4282 if (_g1->is_in_g1_reserved(addr)) |
4283 _cm->grayRoot(oop(addr)); | |
4284 } | |
4285 } | |
4286 | |
4287 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4288 size_t word_sz = old->size(); | |
4289 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4290 // +1 to make the -1 indexes valid... | |
4291 int young_index = from_region->young_index_in_cset()+1; | |
4292 assert( (from_region->is_young() && young_index > 0) || | |
4293 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4294 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4295 markOop m = old->mark(); | |
545 | 4296 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4297 : m->age(); | |
4298 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4299 word_sz); |
4300 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4301 oop obj = oop(obj_ptr); | |
4302 | |
4303 if (obj_ptr == NULL) { | |
4304 // This will either forward-to-self, or detect that someone else has | |
4305 // installed a forwarding pointer. | |
4306 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4307 return _g1->handle_evacuation_failure_par(cl, old); | |
4308 } | |
4309 | |
526 | 4310 // We're going to allocate linearly, so might as well prefetch ahead. |
4311 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4312 | |
342 | 4313 oop forward_ptr = old->forward_to_atomic(obj); |
4314 if (forward_ptr == NULL) { | |
4315 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4316 if (g1p->track_object_age(alloc_purpose)) { |
4317 // We could simply do obj->incr_age(). However, this causes a | |
4318 // performance issue. obj->incr_age() will first check whether | |
4319 // the object has a displaced mark by checking its mark word; | |
4320 // getting the mark word from the new location of the object | |
4321 // stalls. So, given that we already have the mark word and we | |
4322 // are about to install it anyway, it's better to increase the | |
4323 // age on the mark word, when the object does not have a | |
4324 // displaced mark word. We're not expecting many objects to have | |
4325 // a displaced marked word, so that case is not optimized | |
4326 // further (it could be...) and we simply call obj->incr_age(). | |
4327 | |
4328 if (m->has_displaced_mark_helper()) { | |
4329 // in this case, we have to install the mark word first, | |
4330 // otherwise obj looks to be forwarded (the old mark word, | |
4331 // which contains the forward pointer, was copied) | |
4332 obj->set_mark(m); | |
4333 obj->incr_age(); | |
4334 } else { | |
4335 m = m->incr_age(); | |
545 | 4336 obj->set_mark(m); |
526 | 4337 } |
545 | 4338 _par_scan_state->age_table()->add(obj, word_sz); |
4339 } else { | |
4340 obj->set_mark(m); | |
526 | 4341 } |
4342 | |
342 | 4343 // preserve "next" mark bit |
4344 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4345 if (!use_local_bitmaps || | |
4346 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4347 // if we couldn't mark it on the local bitmap (this happens when | |
4348 // the object was not allocated in the GCLab), we have to bite | |
4349 // the bullet and do the standard parallel mark | |
4350 _cm->markAndGrayObjectIfNecessary(obj); | |
4351 } | |
4352 #if 1 | |
4353 if (_g1->isMarkedNext(old)) { | |
4354 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4355 } | |
4356 #endif | |
4357 } | |
4358 | |
4359 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4360 surv_young_words[young_index] += word_sz; | |
4361 | |
4362 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4363 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4364 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4365 _par_scan_state->push_on_queue(old_p); |
342 | 4366 } else { |
526 | 4367 // No point in using the slower heap_region_containing() method, |
4368 // given that we know obj is in the heap. | |
4369 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4370 obj->oop_iterate_backwards(_scanner); |
4371 } | |
4372 } else { | |
4373 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4374 obj = forward_ptr; | |
4375 } | |
4376 return obj; | |
4377 } | |
4378 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4379 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4380 template <class T> |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4381 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4382 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4383 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 4384 assert(barrier != G1BarrierRS || obj != NULL, |
4385 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4386 | |
526 | 4387 // here the null check is implicit in the cset_fast_test() test |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4388 if (_g1->in_cset_fast_test(obj)) { |
342 | 4389 #if G1_REM_SET_LOGGING |
526 | 4390 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4391 "into CS.", p, (void*) obj); | |
342 | 4392 #endif |
526 | 4393 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4394 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 4395 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4396 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4397 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 4398 } |
526 | 4399 // When scanning the RS, we only care about objs in CS. |
4400 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4401 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4402 } |
526 | 4403 } |
4404 | |
4405 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4406 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4407 } |
4408 | |
4409 if (do_gen_barrier && obj != NULL) { | |
4410 par_do_barrier(p); | |
4411 } | |
4412 } | |
4413 | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4414 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4415 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4416 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4417 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 4418 assert(has_partial_array_mask(p), "invariant"); |
4419 oop old = clear_partial_array_mask(p); | |
342 | 4420 assert(old->is_objArray(), "must be obj array"); |
4421 assert(old->is_forwarded(), "must be forwarded"); | |
4422 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4423 | |
4424 objArrayOop obj = objArrayOop(old->forwardee()); | |
4425 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4426 // Process ParGCArrayScanChunk elements now | |
4427 // and push the remainder back onto queue | |
4428 int start = arrayOop(old)->length(); | |
4429 int end = obj->length(); | |
4430 int remainder = end - start; | |
4431 assert(start <= end, "just checking"); | |
4432 if (remainder > 2 * ParGCArrayScanChunk) { | |
4433 // Test above combines last partial chunk with a full chunk | |
4434 end = start + ParGCArrayScanChunk; | |
4435 arrayOop(old)->set_length(end); | |
4436 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4437 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4438 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4439 _par_scan_state->push_on_queue(old_p); |
342 | 4440 } else { |
4441 // Restore length so that the heap remains parsable in | |
4442 // case of evacuation failure. | |
4443 arrayOop(old)->set_length(end); | |
4444 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4445 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 4446 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4447 obj->oop_iterate_range(&_scanner, start, end); |
342 | 4448 } |
4449 | |
4450 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4451 protected: | |
4452 G1CollectedHeap* _g1h; | |
4453 G1ParScanThreadState* _par_scan_state; | |
4454 RefToScanQueueSet* _queues; | |
4455 ParallelTaskTerminator* _terminator; | |
4456 | |
4457 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4458 RefToScanQueueSet* queues() { return _queues; } | |
4459 ParallelTaskTerminator* terminator() { return _terminator; } | |
4460 | |
4461 public: | |
4462 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4463 G1ParScanThreadState* par_scan_state, | |
4464 RefToScanQueueSet* queues, | |
4465 ParallelTaskTerminator* terminator) | |
4466 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4467 _queues(queues), _terminator(terminator) {} | |
4468 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4469 void do_void(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4470 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4471 private: |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4472 inline bool offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4473 }; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4474 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4475 bool G1ParEvacuateFollowersClosure::offer_termination() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4476 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4477 pss->start_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4478 const bool res = terminator()->offer_termination(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4479 pss->end_term_time(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4480 return res; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4481 } |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4482 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4483 void G1ParEvacuateFollowersClosure::do_void() { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4484 StarTask stolen_task; |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4485 G1ParScanThreadState* const pss = par_scan_state(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4486 pss->trim_queue(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4487 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4488 do { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4489 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4490 assert(pss->verify_task(stolen_task), "sanity"); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4491 if (stolen_task.is_narrow()) { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4492 pss->deal_with_reference((narrowOop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4493 } else { |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4494 pss->deal_with_reference((oop*) stolen_task); |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4495 } |
1883
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4496 |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4497 // We've just processed a reference and we might have made |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4498 // available new entries on the queues. So we have to make sure |
35e4e086d5f5
6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly
tonyp
parents:
1862
diff
changeset
|
4499 // we drain the queues as necessary. |
342 | 4500 pss->trim_queue(); |
4501 } | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4502 } while (!offer_termination()); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4503 |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4504 pss->retire_alloc_buffers(); |
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4505 } |
342 | 4506 |
4507 class G1ParTask : public AbstractGangTask { | |
4508 protected: | |
4509 G1CollectedHeap* _g1h; | |
4510 RefToScanQueueSet *_queues; | |
4511 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4512 int _n_workers; |
342 | 4513 |
4514 Mutex _stats_lock; | |
4515 Mutex* stats_lock() { return &_stats_lock; } | |
4516 | |
4517 size_t getNCards() { | |
4518 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4519 / G1BlockOffsetSharedArray::N_bytes; | |
4520 } | |
4521 | |
4522 public: | |
4523 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4524 : AbstractGangTask("G1 collection"), | |
4525 _g1h(g1h), | |
4526 _queues(task_queues), | |
4527 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4528 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4529 _n_workers(workers) |
342 | 4530 {} |
4531 | |
4532 RefToScanQueueSet* queues() { return _queues; } | |
4533 | |
4534 RefToScanQueue *work_queue(int i) { | |
4535 return queues()->queue(i); | |
4536 } | |
4537 | |
4538 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
4539 if (i >= _n_workers) return; // no work needed this round |
1611 | 4540 |
4541 double start_time_ms = os::elapsedTime() * 1000.0; | |
4542 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); | |
4543 | |
342 | 4544 ResourceMark rm; |
4545 HandleMark hm; | |
4546 | |
526 | 4547 G1ParScanThreadState pss(_g1h, i); |
4548 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4549 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4550 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4551 |
4552 pss.set_evac_closure(&scan_evac_cl); | |
4553 pss.set_evac_failure_closure(&evac_failure_cl); | |
4554 pss.set_partial_scan_closure(&partial_scan_cl); | |
4555 | |
4556 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4557 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4558 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4559 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4560 |
342 | 4561 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4562 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4563 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4564 | |
4565 OopsInHeapRegionClosure *scan_root_cl; | |
4566 OopsInHeapRegionClosure *scan_perm_cl; | |
4567 | |
1359
23b1b27ac76c
6909756: G1: guarantee(G1CollectedHeap::heap()->mark_in_progress(),"Precondition.")
tonyp
parents:
1313
diff
changeset
|
4568 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
342 | 4569 scan_root_cl = &scan_mark_root_cl; |
4570 scan_perm_cl = &scan_mark_perm_cl; | |
4571 } else { | |
4572 scan_root_cl = &only_scan_root_cl; | |
4573 scan_perm_cl = &only_scan_perm_cl; | |
4574 } | |
4575 | |
4576 pss.start_strong_roots(); | |
4577 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4578 SharedHeap::SO_AllClasses, | |
4579 scan_root_cl, | |
1261
0414c1049f15
6923991: G1: improve scalability of RSet scanning
iveresov
parents:
1245
diff
changeset
|
4580 &push_heap_rs_cl, |
342 | 4581 scan_perm_cl, |
4582 i); | |
4583 pss.end_strong_roots(); | |
4584 { | |
4585 double start = os::elapsedTime(); | |
4586 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4587 evac.do_void(); | |
4588 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4589 double term_ms = pss.term_time()*1000.0; | |
4590 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
1611 | 4591 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
342 | 4592 } |
1282 | 4593 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
342 | 4594 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4595 | |
4596 // Clean up any par-expanded rem sets. | |
4597 HeapRegionRemSet::par_cleanup(); | |
4598 | |
4599 if (ParallelGCVerbose) { | |
1709 | 4600 MutexLocker x(stats_lock()); |
4601 pss.print_termination_stats(i); | |
342 | 4602 } |
4603 | |
1862
b14ec34b1e07
6989448: G1: refactor and simplify G1ParScanThreadState
jcoomes
parents:
1861
diff
changeset
|
4604 assert(pss.refs()->is_empty(), "should be empty"); |
1611 | 4605 double end_time_ms = os::elapsedTime() * 1000.0; |
4606 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); | |
342 | 4607 } |
4608 }; | |
4609 | |
4610 // *** Common G1 Evacuation Stuff | |
4611 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4612 // This method is run in a GC worker. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4613 |
342 | 4614 void |
4615 G1CollectedHeap:: | |
4616 g1_process_strong_roots(bool collecting_perm_gen, | |
4617 SharedHeap::ScanningOption so, | |
4618 OopClosure* scan_non_heap_roots, | |
4619 OopsInHeapRegionClosure* scan_rs, | |
4620 OopsInGenClosure* scan_perm, | |
4621 int worker_i) { | |
4622 // First scan the strong roots, including the perm gen. | |
4623 double ext_roots_start = os::elapsedTime(); | |
4624 double closure_app_time_sec = 0.0; | |
4625 | |
4626 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4627 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4628 buf_scan_perm.set_generation(perm_gen()); | |
4629 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4630 // Walk the code cache w/o buffering, because StarTask cannot handle |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4631 // unaligned oop locations. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4632 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4633 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4634 process_strong_roots(false, // no scoping; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4635 collecting_perm_gen, so, |
342 | 4636 &buf_scan_non_heap_roots, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4637 &eager_scan_code_roots, |
342 | 4638 &buf_scan_perm); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4639 |
342 | 4640 // Finish up any enqueued closure apps. |
4641 buf_scan_non_heap_roots.done(); | |
4642 buf_scan_perm.done(); | |
4643 double ext_roots_end = os::elapsedTime(); | |
4644 g1_policy()->reset_obj_copy_time(worker_i); | |
4645 double obj_copy_time_sec = | |
4646 buf_scan_non_heap_roots.closure_app_seconds() + | |
4647 buf_scan_perm.closure_app_seconds(); | |
4648 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4649 double ext_root_time_ms = | |
4650 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4651 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4652 | |
4653 // Scan strong roots in mark stack. | |
4654 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4655 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4656 } | |
4657 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4658 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4659 | |
4660 // XXX What should this be doing in the parallel case? | |
4661 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4662 // Now scan the complement of the collection set. | |
4663 if (scan_rs != NULL) { | |
4664 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4665 } | |
4666 // Finish with the ref_processor roots. | |
4667 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4668 ref_processor()->oops_do(scan_non_heap_roots); | |
4669 } | |
4670 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4671 _process_strong_tasks->all_tasks_completed(); | |
4672 } | |
4673 | |
4674 void | |
4675 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4676 OopClosure* non_root_closure) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4677 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4678 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
342 | 4679 } |
4680 | |
4681 | |
4682 class SaveMarksClosure: public HeapRegionClosure { | |
4683 public: | |
4684 bool doHeapRegion(HeapRegion* r) { | |
4685 r->save_marks(); | |
4686 return false; | |
4687 } | |
4688 }; | |
4689 | |
4690 void G1CollectedHeap::save_marks() { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4691 if (!CollectedHeap::use_parallel_gc_threads()) { |
342 | 4692 SaveMarksClosure sm; |
4693 heap_region_iterate(&sm); | |
4694 } | |
4695 // We do this even in the parallel case | |
4696 perm_gen()->save_marks(); | |
4697 } | |
4698 | |
4699 void G1CollectedHeap::evacuate_collection_set() { | |
4700 set_evacuation_failed(false); | |
4701 | |
4702 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4703 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4704 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4705 | |
342 | 4706 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4707 set_par_threads(n_workers); | |
4708 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4709 | |
4710 init_for_evac_failure(NULL); | |
4711 | |
4712 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4713 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4714 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4715 double start_par = os::elapsedTime(); |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1755
diff
changeset
|
4716 if (G1CollectedHeap::use_parallel_gc_threads()) { |
342 | 4717 // The individual threads will set their evac-failure closures. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4718 StrongRootsScope srs(this); |
1709 | 4719 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
342 | 4720 workers()->run_task(&g1_par_task); |
4721 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
890
diff
changeset
|
4722 StrongRootsScope srs(this); |
342 | 4723 g1_par_task.work(0); |
4724 } | |
4725 | |
4726 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4727 g1_policy()->record_par_time(par_time); | |
4728 set_par_threads(0); | |
4729 // Is this the right thing to do here? We don't save marks | |
4730 // on individual heap regions when we allocate from | |
4731 // them in parallel, so this seems like the correct place for this. | |
545 | 4732 retire_all_alloc_regions(); |
342 | 4733 { |
4734 G1IsAliveClosure is_alive(this); | |
4735 G1KeepAliveClosure keep_alive(this); | |
4736 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4737 } | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4738 release_gc_alloc_regions(false /* totally */); |
342 | 4739 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4740 |
889 | 4741 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4742 concurrent_g1_refine()->set_use_cache(true); |
4743 | |
4744 finalize_for_evac_failure(); | |
4745 | |
4746 // Must do this before removing self-forwarding pointers, which clears | |
4747 // the per-region evac-failure flags. | |
4748 concurrent_mark()->complete_marking_in_collection_set(); | |
4749 | |
4750 if (evacuation_failed()) { | |
4751 remove_self_forwarding_pointers(); | |
4752 if (PrintGCDetails) { | |
1719
b63010841f78
6975964: G1: print out a more descriptive message for evacuation failure when +PrintGCDetails is set
tonyp
parents:
1718
diff
changeset
|
4753 gclog_or_tty->print(" (to-space overflow)"); |
342 | 4754 } else if (PrintGC) { |
4755 gclog_or_tty->print("--"); | |
4756 } | |
4757 } | |
4758 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4759 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4760 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4761 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4762 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
1111 | 4763 |
4764 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); | |
4765 dcq.merge_bufferlists(&dirty_card_queue_set()); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4766 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4767 } |
342 | 4768 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4769 } | |
4770 | |
4771 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4772 size_t pre_used = 0; | |
4773 size_t cleared_h_regions = 0; | |
4774 size_t freed_regions = 0; | |
4775 UncleanRegionList local_list; | |
4776 | |
4777 HeapWord* start = hr->bottom(); | |
4778 HeapWord* end = hr->prev_top_at_mark_start(); | |
4779 size_t used_bytes = hr->used(); | |
4780 size_t live_bytes = hr->max_live_bytes(); | |
4781 if (used_bytes > 0) { | |
4782 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4783 } else { | |
4784 guarantee( live_bytes == 0, "invariant" ); | |
4785 } | |
4786 | |
4787 size_t garbage_bytes = used_bytes - live_bytes; | |
4788 if (garbage_bytes > 0) | |
4789 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4790 | |
4791 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4792 &local_list); | |
4793 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4794 &local_list); | |
4795 } | |
4796 | |
4797 void | |
4798 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4799 size_t& pre_used, | |
4800 size_t& cleared_h_regions, | |
4801 size_t& freed_regions, | |
4802 UncleanRegionList* list, | |
4803 bool par) { | |
4804 pre_used += hr->used(); | |
4805 if (hr->isHumongous()) { | |
4806 assert(hr->startsHumongous(), | |
4807 "Only the start of a humongous region should be freed."); | |
4808 int ind = _hrs->find(hr); | |
4809 assert(ind != -1, "Should have an index."); | |
4810 // Clear the start region. | |
4811 hr->hr_clear(par, true /*clear_space*/); | |
4812 list->insert_before_head(hr); | |
4813 cleared_h_regions++; | |
4814 freed_regions++; | |
4815 // Clear any continued regions. | |
4816 ind++; | |
4817 while ((size_t)ind < n_regions()) { | |
4818 HeapRegion* hrc = _hrs->at(ind); | |
4819 if (!hrc->continuesHumongous()) break; | |
4820 // Otherwise, does continue the H region. | |
4821 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4822 hrc->hr_clear(par, true /*clear_space*/); | |
4823 cleared_h_regions++; | |
4824 freed_regions++; | |
4825 list->insert_before_head(hrc); | |
4826 ind++; | |
4827 } | |
4828 } else { | |
4829 hr->hr_clear(par, true /*clear_space*/); | |
4830 list->insert_before_head(hr); | |
4831 freed_regions++; | |
4832 // If we're using clear2, this should not be enabled. | |
4833 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4834 } | |
4835 } | |
4836 | |
4837 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4838 size_t cleared_h_regions, | |
4839 size_t freed_regions, | |
4840 UncleanRegionList* list) { | |
4841 if (list != NULL && list->sz() > 0) { | |
4842 prepend_region_list_on_unclean_list(list); | |
4843 } | |
4844 // Acquire a lock, if we're parallel, to update possibly-shared | |
4845 // variables. | |
4846 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4847 { | |
4848 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4849 _summary_bytes_used -= pre_used; | |
4850 _num_humongous_regions -= (int) cleared_h_regions; | |
4851 _free_regions += freed_regions; | |
4852 } | |
4853 } | |
4854 | |
4855 | |
4856 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4857 while (list != NULL) { | |
4858 guarantee( list->is_young(), "invariant" ); | |
4859 | |
4860 HeapWord* bottom = list->bottom(); | |
4861 HeapWord* end = list->end(); | |
4862 MemRegion mr(bottom, end); | |
4863 ct_bs->dirty(mr); | |
4864 | |
4865 list = list->get_next_young_region(); | |
4866 } | |
4867 } | |
4868 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4869 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4870 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4871 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4872 G1CollectedHeap* _g1h; |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4873 HeapRegion* volatile _su_head; |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4874 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4875 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4876 G1CollectedHeap* g1h, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4877 HeapRegion* survivor_list) : |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4878 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4879 _ct_bs(ct_bs), |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4880 _g1h(g1h), |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4881 _su_head(survivor_list) |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4882 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4883 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4884 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4885 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4886 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4887 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4888 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4889 // Redirty the cards of the survivor regions. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4890 dirty_list(&this->_su_head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4891 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4892 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4893 void clear_cards(HeapRegion* r) { |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4894 // Cards for Survivor regions will be dirtied later. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4895 if (!r->is_survivor()) { |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4896 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4897 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4898 } |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4899 |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4900 void dirty_list(HeapRegion* volatile * head_ptr) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4901 HeapRegion* head; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4902 do { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4903 // Pop region off the list. |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4904 head = *head_ptr; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4905 if (head != NULL) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4906 HeapRegion* r = (HeapRegion*) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4907 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4908 if (r == head) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4909 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4910 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4911 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4912 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4913 } while (*head_ptr != NULL); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4914 } |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4915 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4916 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4917 |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4918 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4919 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4920 CardTableModRefBS* _ct_bs; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4921 public: |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4922 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4923 : _ct_bs(ct_bs) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4924 { } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4925 virtual bool doHeapRegion(HeapRegion* r) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4926 { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4927 MemRegion mr(r->bottom(), r->end()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4928 if (r->is_survivor()) { |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4929 _ct_bs->verify_dirty_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4930 } else { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4931 _ct_bs->verify_clean_region(mr); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4932 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4933 return false; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4934 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4935 }; |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4936 #endif |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4937 |
342 | 4938 void G1CollectedHeap::cleanUpCardTable() { |
4939 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4940 double start = os::elapsedTime(); | |
4941 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4942 // Iterate over the dirty cards region list. |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4943 G1ParCleanupCTTask cleanup_task(ct_bs, this, |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4944 _young_list->first_survivor_region()); |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4945 |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4946 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4947 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4948 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4949 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4950 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4951 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4952 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4953 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4954 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4955 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4956 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4957 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4958 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4959 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4960 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4961 // now, redirty the cards of the survivor regions |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4962 // (it seemed faster to do it this way, instead of iterating over |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4963 // all regions and then clearing / dirtying as appropriate) |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4964 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4965 } |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4966 |
342 | 4967 double elapsed = os::elapsedTime() - start; |
4968 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
940
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4969 #ifndef PRODUCT |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4970 if (G1VerifyCTCleanup || VerifyAfterGC) { |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4971 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4972 heap_region_iterate(&cleanup_verifier); |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4973 } |
8624da129f0b
6841313: G1: dirty cards of survivor regions in parallel
apetrusenko
parents:
936
diff
changeset
|
4974 #endif |
342 | 4975 } |
4976 | |
4977 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4978 double young_time_ms = 0.0; | |
4979 double non_young_time_ms = 0.0; | |
4980 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4981 // Since the collection set is a superset of the the young list, |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4982 // all we need to do to clear the young list is clear its |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4983 // head and length, and unlink any young regions in the code below |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4984 _young_list->clear(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
4985 |
342 | 4986 G1CollectorPolicy* policy = g1_policy(); |
4987 | |
4988 double start_sec = os::elapsedTime(); | |
4989 bool non_young = true; | |
4990 | |
4991 HeapRegion* cur = cs_head; | |
4992 int age_bound = -1; | |
4993 size_t rs_lengths = 0; | |
4994 | |
4995 while (cur != NULL) { | |
4996 if (non_young) { | |
4997 if (cur->is_young()) { | |
4998 double end_sec = os::elapsedTime(); | |
4999 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5000 non_young_time_ms += elapsed_ms; | |
5001 | |
5002 start_sec = os::elapsedTime(); | |
5003 non_young = false; | |
5004 } | |
5005 } else { | |
5006 if (!cur->is_on_free_list()) { | |
5007 double end_sec = os::elapsedTime(); | |
5008 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5009 young_time_ms += elapsed_ms; | |
5010 | |
5011 start_sec = os::elapsedTime(); | |
5012 non_young = true; | |
5013 } | |
5014 } | |
5015 | |
5016 rs_lengths += cur->rem_set()->occupied(); | |
5017 | |
5018 HeapRegion* next = cur->next_in_collection_set(); | |
5019 assert(cur->in_collection_set(), "bad CS"); | |
5020 cur->set_next_in_collection_set(NULL); | |
5021 cur->set_in_collection_set(false); | |
5022 | |
5023 if (cur->is_young()) { | |
5024 int index = cur->young_index_in_cset(); | |
5025 guarantee( index != -1, "invariant" ); | |
5026 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
5027 size_t words_survived = _surviving_young_words[index]; | |
5028 cur->record_surv_words_in_group(words_survived); | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5029 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5030 // At this point the we have 'popped' cur from the collection set |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5031 // (linked via next_in_collection_set()) but it is still in the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5032 // young list (linked via next_young_region()). Clear the |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5033 // _next_young_region field. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5034 cur->set_next_young_region(NULL); |
342 | 5035 } else { |
5036 int index = cur->young_index_in_cset(); | |
5037 guarantee( index == -1, "invariant" ); | |
5038 } | |
5039 | |
5040 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
5041 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
5042 "invariant" ); | |
5043 | |
5044 if (!cur->evacuation_failed()) { | |
5045 // And the region is empty. | |
5046 assert(!cur->is_empty(), | |
5047 "Should not have empty regions in a CS."); | |
5048 free_region(cur); | |
5049 } else { | |
5050 cur->uninstall_surv_rate_group(); | |
5051 if (cur->is_young()) | |
5052 cur->set_young_index_in_cset(-1); | |
5053 cur->set_not_young(); | |
5054 cur->set_evacuation_failed(false); | |
5055 } | |
5056 cur = next; | |
5057 } | |
5058 | |
5059 policy->record_max_rs_lengths(rs_lengths); | |
5060 policy->cset_regions_freed(); | |
5061 | |
5062 double end_sec = os::elapsedTime(); | |
5063 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
5064 if (non_young) | |
5065 non_young_time_ms += elapsed_ms; | |
5066 else | |
5067 young_time_ms += elapsed_ms; | |
5068 | |
5069 policy->record_young_free_cset_time_ms(young_time_ms); | |
5070 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
5071 } | |
5072 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5073 // This routine is similar to the above but does not record |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5074 // any policy statistics or update free lists; we are abandoning |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5075 // the current incremental collection set in preparation of a |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5076 // full collection. After the full GC we will start to build up |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5077 // the incremental collection set again. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5078 // This is only called when we're doing a full collection |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5079 // and is immediately followed by the tearing down of the young list. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5080 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5081 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5082 HeapRegion* cur = cs_head; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5083 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5084 while (cur != NULL) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5085 HeapRegion* next = cur->next_in_collection_set(); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5086 assert(cur->in_collection_set(), "bad CS"); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5087 cur->set_next_in_collection_set(NULL); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5088 cur->set_in_collection_set(false); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5089 cur->set_young_index_in_cset(-1); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5090 cur = next; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5091 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5092 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5093 |
342 | 5094 HeapRegion* |
5095 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
5096 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5097 HeapRegion* res = pop_unclean_region_list_locked(); | |
5098 if (res != NULL) { | |
5099 assert(!res->continuesHumongous() && | |
5100 res->zero_fill_state() != HeapRegion::Allocated, | |
5101 "Only free regions on unclean list."); | |
5102 if (zero_filled) { | |
5103 res->ensure_zero_filled_locked(); | |
5104 res->set_zero_fill_allocated(); | |
5105 } | |
5106 } | |
5107 return res; | |
5108 } | |
5109 | |
5110 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
5111 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5112 return alloc_region_from_unclean_list_locked(zero_filled); | |
5113 } | |
5114 | |
5115 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
5116 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5117 put_region_on_unclean_list_locked(r); | |
5118 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
5119 } | |
5120 | |
5121 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
5122 MutexLockerEx x(Cleanup_mon); | |
5123 set_unclean_regions_coming_locked(b); | |
5124 } | |
5125 | |
5126 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
5127 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
5128 _unclean_regions_coming = b; | |
5129 // Wake up mutator threads that might be waiting for completeCleanup to | |
5130 // finish. | |
5131 if (!b) Cleanup_mon->notify_all(); | |
5132 } | |
5133 | |
5134 void G1CollectedHeap::wait_for_cleanup_complete() { | |
1973 | 5135 assert_not_at_safepoint(); |
342 | 5136 MutexLockerEx x(Cleanup_mon); |
5137 wait_for_cleanup_complete_locked(); | |
5138 } | |
5139 | |
5140 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
5141 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
5142 while (_unclean_regions_coming) { | |
5143 Cleanup_mon->wait(); | |
5144 } | |
5145 } | |
5146 | |
5147 void | |
5148 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
5149 assert(ZF_mon->owned_by_self(), "precondition."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5150 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5151 if (r->is_gc_alloc_region()) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5152 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5153 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5154 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5155 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5156 region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5157 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1489
diff
changeset
|
5158 #endif |
342 | 5159 _unclean_region_list.insert_before_head(r); |
5160 } | |
5161 | |
5162 void | |
5163 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
5164 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5165 prepend_region_list_on_unclean_list_locked(list); | |
5166 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
5167 } | |
5168 | |
5169 void | |
5170 G1CollectedHeap:: | |
5171 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
5172 assert(ZF_mon->owned_by_self(), "precondition."); | |
5173 _unclean_region_list.prepend_list(list); | |
5174 } | |
5175 | |
5176 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
5177 assert(ZF_mon->owned_by_self(), "precondition."); | |
5178 HeapRegion* res = _unclean_region_list.pop(); | |
5179 if (res != NULL) { | |
5180 // Inform ZF thread that there's a new unclean head. | |
5181 if (_unclean_region_list.hd() != NULL && should_zf()) | |
5182 ZF_mon->notify_all(); | |
5183 } | |
5184 return res; | |
5185 } | |
5186 | |
5187 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
5188 assert(ZF_mon->owned_by_self(), "precondition."); | |
5189 return _unclean_region_list.hd(); | |
5190 } | |
5191 | |
5192 | |
5193 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
5194 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5195 HeapRegion* r = peek_unclean_region_list_locked(); | |
5196 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
5197 // Result of below must be equal to "r", since we hold the lock. | |
5198 (void)pop_unclean_region_list_locked(); | |
5199 put_free_region_on_list_locked(r); | |
5200 return true; | |
5201 } else { | |
5202 return false; | |
5203 } | |
5204 } | |
5205 | |
5206 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
5207 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5208 return move_cleaned_region_to_free_list_locked(); | |
5209 } | |
5210 | |
5211 | |
5212 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
5213 assert(ZF_mon->owned_by_self(), "precondition."); | |
5214 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5215 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
5216 "Regions on free list must be zero filled"); | |
5217 assert(!r->isHumongous(), "Must not be humongous."); | |
5218 assert(r->is_empty(), "Better be empty"); | |
5219 assert(!r->is_on_free_list(), | |
5220 "Better not already be on free list"); | |
5221 assert(!r->is_on_unclean_list(), | |
5222 "Better not already be on unclean list"); | |
5223 r->set_on_free_list(true); | |
5224 r->set_next_on_free_list(_free_region_list); | |
5225 _free_region_list = r; | |
5226 _free_region_list_size++; | |
5227 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5228 } | |
5229 | |
5230 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
5231 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5232 put_free_region_on_list_locked(r); | |
5233 } | |
5234 | |
5235 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
5236 assert(ZF_mon->owned_by_self(), "precondition."); | |
5237 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5238 HeapRegion* res = _free_region_list; | |
5239 if (res != NULL) { | |
5240 _free_region_list = res->next_from_free_list(); | |
5241 _free_region_list_size--; | |
5242 res->set_on_free_list(false); | |
5243 res->set_next_on_free_list(NULL); | |
5244 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5245 } | |
5246 return res; | |
5247 } | |
5248 | |
5249 | |
5250 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
5251 // By self, or on behalf of self. | |
5252 assert(Heap_lock->is_locked(), "Precondition"); | |
5253 HeapRegion* res = NULL; | |
5254 bool first = true; | |
5255 while (res == NULL) { | |
5256 if (zero_filled || !first) { | |
5257 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5258 res = pop_free_region_list_locked(); | |
5259 if (res != NULL) { | |
5260 assert(!res->zero_fill_is_allocated(), | |
5261 "No allocated regions on free list."); | |
5262 res->set_zero_fill_allocated(); | |
5263 } else if (!first) { | |
5264 break; // We tried both, time to return NULL. | |
5265 } | |
5266 } | |
5267 | |
5268 if (res == NULL) { | |
5269 res = alloc_region_from_unclean_list(zero_filled); | |
5270 } | |
5271 assert(res == NULL || | |
5272 !zero_filled || | |
5273 res->zero_fill_is_allocated(), | |
5274 "We must have allocated the region we're returning"); | |
5275 first = false; | |
5276 } | |
5277 return res; | |
5278 } | |
5279 | |
5280 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
5281 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5282 { | |
5283 HeapRegion* prev = NULL; | |
5284 HeapRegion* cur = _unclean_region_list.hd(); | |
5285 while (cur != NULL) { | |
5286 HeapRegion* next = cur->next_from_unclean_list(); | |
5287 if (cur->zero_fill_is_allocated()) { | |
5288 // Remove from the list. | |
5289 if (prev == NULL) { | |
5290 (void)_unclean_region_list.pop(); | |
5291 } else { | |
5292 _unclean_region_list.delete_after(prev); | |
5293 } | |
5294 cur->set_on_unclean_list(false); | |
5295 cur->set_next_on_unclean_list(NULL); | |
5296 } else { | |
5297 prev = cur; | |
5298 } | |
5299 cur = next; | |
5300 } | |
5301 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
5302 "Inv"); | |
5303 } | |
5304 | |
5305 { | |
5306 HeapRegion* prev = NULL; | |
5307 HeapRegion* cur = _free_region_list; | |
5308 while (cur != NULL) { | |
5309 HeapRegion* next = cur->next_from_free_list(); | |
5310 if (cur->zero_fill_is_allocated()) { | |
5311 // Remove from the list. | |
5312 if (prev == NULL) { | |
5313 _free_region_list = cur->next_from_free_list(); | |
5314 } else { | |
5315 prev->set_next_on_free_list(cur->next_from_free_list()); | |
5316 } | |
5317 cur->set_on_free_list(false); | |
5318 cur->set_next_on_free_list(NULL); | |
5319 _free_region_list_size--; | |
5320 } else { | |
5321 prev = cur; | |
5322 } | |
5323 cur = next; | |
5324 } | |
5325 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5326 } | |
5327 } | |
5328 | |
5329 bool G1CollectedHeap::verify_region_lists() { | |
5330 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5331 return verify_region_lists_locked(); | |
5332 } | |
5333 | |
5334 bool G1CollectedHeap::verify_region_lists_locked() { | |
5335 HeapRegion* unclean = _unclean_region_list.hd(); | |
5336 while (unclean != NULL) { | |
5337 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
5338 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
5339 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
5340 "Everything else is possible."); | |
5341 unclean = unclean->next_from_unclean_list(); | |
5342 } | |
5343 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
5344 | |
5345 HeapRegion* free_r = _free_region_list; | |
5346 while (free_r != NULL) { | |
5347 assert(free_r->is_on_free_list(), "Well, it is!"); | |
5348 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
5349 switch (free_r->zero_fill_state()) { | |
5350 case HeapRegion::NotZeroFilled: | |
5351 case HeapRegion::ZeroFilling: | |
5352 guarantee(false, "Should not be on free list."); | |
5353 break; | |
5354 default: | |
5355 // Everything else is possible. | |
5356 break; | |
5357 } | |
5358 free_r = free_r->next_from_free_list(); | |
5359 } | |
5360 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
5361 // If we didn't do an assertion... | |
5362 return true; | |
5363 } | |
5364 | |
5365 size_t G1CollectedHeap::free_region_list_length() { | |
5366 assert(ZF_mon->owned_by_self(), "precondition."); | |
5367 size_t len = 0; | |
5368 HeapRegion* cur = _free_region_list; | |
5369 while (cur != NULL) { | |
5370 len++; | |
5371 cur = cur->next_from_free_list(); | |
5372 } | |
5373 return len; | |
5374 } | |
5375 | |
5376 size_t G1CollectedHeap::unclean_region_list_length() { | |
5377 assert(ZF_mon->owned_by_self(), "precondition."); | |
5378 return _unclean_region_list.length(); | |
5379 } | |
5380 | |
5381 size_t G1CollectedHeap::n_regions() { | |
5382 return _hrs->length(); | |
5383 } | |
5384 | |
5385 size_t G1CollectedHeap::max_regions() { | |
5386 return | |
5387 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
5388 HeapRegion::GrainBytes; | |
5389 } | |
5390 | |
5391 size_t G1CollectedHeap::free_regions() { | |
5392 /* Possibly-expensive assert. | |
5393 assert(_free_regions == count_free_regions(), | |
5394 "_free_regions is off."); | |
5395 */ | |
5396 return _free_regions; | |
5397 } | |
5398 | |
5399 bool G1CollectedHeap::should_zf() { | |
5400 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
5401 } | |
5402 | |
5403 class RegionCounter: public HeapRegionClosure { | |
5404 size_t _n; | |
5405 public: | |
5406 RegionCounter() : _n(0) {} | |
5407 bool doHeapRegion(HeapRegion* r) { | |
677 | 5408 if (r->is_empty()) { |
342 | 5409 assert(!r->isHumongous(), "H regions should not be empty."); |
5410 _n++; | |
5411 } | |
5412 return false; | |
5413 } | |
5414 int res() { return (int) _n; } | |
5415 }; | |
5416 | |
5417 size_t G1CollectedHeap::count_free_regions() { | |
5418 RegionCounter rc; | |
5419 heap_region_iterate(&rc); | |
5420 size_t n = rc.res(); | |
5421 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
5422 n--; | |
5423 return n; | |
5424 } | |
5425 | |
5426 size_t G1CollectedHeap::count_free_regions_list() { | |
5427 size_t n = 0; | |
5428 size_t o = 0; | |
5429 ZF_mon->lock_without_safepoint_check(); | |
5430 HeapRegion* cur = _free_region_list; | |
5431 while (cur != NULL) { | |
5432 cur = cur->next_from_free_list(); | |
5433 n++; | |
5434 } | |
5435 size_t m = unclean_region_list_length(); | |
5436 ZF_mon->unlock(); | |
5437 return n + m; | |
5438 } | |
5439 | |
5440 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5441 assert(heap_lock_held_for_gc(), | |
5442 "the heap lock should already be held by or for this thread"); | |
5443 _young_list->push_region(hr); | |
5444 g1_policy()->set_region_short_lived(hr); | |
5445 } | |
5446 | |
5447 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5448 private: | |
5449 bool _success; | |
5450 public: | |
5451 NoYoungRegionsClosure() : _success(true) { } | |
5452 bool doHeapRegion(HeapRegion* r) { | |
5453 if (r->is_young()) { | |
5454 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5455 r->bottom(), r->end()); | |
5456 _success = false; | |
5457 } | |
5458 return false; | |
5459 } | |
5460 bool success() { return _success; } | |
5461 }; | |
5462 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5463 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5464 bool ret = _young_list->check_list_empty(check_sample); |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5465 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1391
diff
changeset
|
5466 if (check_heap) { |
342 | 5467 NoYoungRegionsClosure closure; |
5468 heap_region_iterate(&closure); | |
5469 ret = ret && closure.success(); | |
5470 } | |
5471 | |
5472 return ret; | |
5473 } | |
5474 | |
5475 void G1CollectedHeap::empty_young_list() { | |
5476 assert(heap_lock_held_for_gc(), | |
5477 "the heap lock should already be held by or for this thread"); | |
5478 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5479 | |
5480 _young_list->empty_list(); | |
5481 } | |
5482 | |
5483 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5484 bool no_allocs = true; | |
5485 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5486 HeapRegion* r = _gc_alloc_regions[ap]; | |
5487 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5488 } | |
5489 return no_allocs; | |
5490 } | |
5491 | |
545 | 5492 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5493 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5494 HeapRegion* r = _gc_alloc_regions[ap]; | |
5495 if (r != NULL) { | |
5496 // Check for aliases. | |
5497 bool has_processed_alias = false; | |
5498 for (int i = 0; i < ap; ++i) { | |
5499 if (_gc_alloc_regions[i] == r) { | |
5500 has_processed_alias = true; | |
5501 break; | |
5502 } | |
5503 } | |
5504 if (!has_processed_alias) { | |
545 | 5505 retire_alloc_region(r, false /* par */); |
342 | 5506 } |
5507 } | |
5508 } | |
5509 } | |
5510 | |
5511 | |
5512 // Done at the start of full GC. | |
5513 void G1CollectedHeap::tear_down_region_lists() { | |
5514 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5515 while (pop_unclean_region_list_locked() != NULL) ; | |
5516 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1394
diff
changeset
|
5517 "Postconditions of loop."); |
342 | 5518 while (pop_free_region_list_locked() != NULL) ; |
5519 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5520 if (_free_region_list_size != 0) { | |
5521 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
5522 print_on(gclog_or_tty, true /* extended */); |
342 | 5523 } |
5524 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5525 } | |
5526 | |
5527 | |
5528 class RegionResetter: public HeapRegionClosure { | |
5529 G1CollectedHeap* _g1; | |
5530 int _n; | |
5531 public: | |
5532 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5533 bool doHeapRegion(HeapRegion* r) { | |
5534 if (r->continuesHumongous()) return false; | |
5535 if (r->top() > r->bottom()) { | |
5536 if (r->top() < r->end()) { | |
5537 Copy::fill_to_words(r->top(), | |
5538 pointer_delta(r->end(), r->top())); | |
5539 } | |
5540 r->set_zero_fill_allocated(); | |
5541 } else { | |
5542 assert(r->is_empty(), "tautology"); | |
677 | 5543 _n++; |
5544 switch (r->zero_fill_state()) { | |
342 | 5545 case HeapRegion::NotZeroFilled: |
5546 case HeapRegion::ZeroFilling: | |
5547 _g1->put_region_on_unclean_list_locked(r); | |
5548 break; | |
5549 case HeapRegion::Allocated: | |
5550 r->set_zero_fill_complete(); | |
5551 // no break; go on to put on free list. | |
5552 case HeapRegion::ZeroFilled: | |
5553 _g1->put_free_region_on_list_locked(r); | |
5554 break; | |
5555 } | |
5556 } | |
5557 return false; | |
5558 } | |
5559 | |
5560 int getFreeRegionCount() {return _n;} | |
5561 }; | |
5562 | |
5563 // Done at the end of full GC. | |
5564 void G1CollectedHeap::rebuild_region_lists() { | |
5565 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5566 // This needs to go at the end of the full GC. | |
5567 RegionResetter rs; | |
5568 heap_region_iterate(&rs); | |
5569 _free_regions = rs.getFreeRegionCount(); | |
5570 // Tell the ZF thread it may have work to do. | |
5571 if (should_zf()) ZF_mon->notify_all(); | |
5572 } | |
5573 | |
5574 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5575 G1CollectedHeap* _g1; | |
5576 int _n; | |
5577 public: | |
5578 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5579 bool doHeapRegion(HeapRegion* r) { | |
5580 if (r->continuesHumongous()) return false; | |
5581 if (r->top() > r->bottom()) { | |
5582 // There are assertions in "set_zero_fill_needed()" below that | |
5583 // require top() == bottom(), so this is technically illegal. | |
5584 // We'll skirt the law here, by making that true temporarily. | |
5585 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5586 r->set_top(r->bottom())); | |
5587 r->set_zero_fill_needed(); | |
5588 DEBUG_ONLY(r->set_top(save_top)); | |
5589 } | |
5590 return false; | |
5591 } | |
5592 }; | |
5593 | |
5594 // Done at the start of full GC. | |
5595 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5596 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5597 // This needs to go at the end of the full GC. | |
5598 UsedRegionsNeedZeroFillSetter rs; | |
5599 heap_region_iterate(&rs); | |
5600 } | |
5601 | |
5602 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5603 _refine_cte_cl->set_concurrent(concurrent); | |
5604 } | |
5605 | |
5606 #ifndef PRODUCT | |
5607 | |
5608 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5609 public: | |
5610 bool doHeapRegion(HeapRegion *r) { | |
5611 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5612 if (r != NULL) { | |
5613 if (r->is_on_free_list()) | |
5614 gclog_or_tty->print("Free "); | |
5615 if (r->is_young()) | |
5616 gclog_or_tty->print("Young "); | |
5617 if (r->isHumongous()) | |
5618 gclog_or_tty->print("Is Humongous "); | |
5619 r->print(); | |
5620 } | |
5621 return false; | |
5622 } | |
5623 }; | |
5624 | |
5625 class SortHeapRegionClosure : public HeapRegionClosure { | |
5626 size_t young_regions,free_regions, unclean_regions; | |
5627 size_t hum_regions, count; | |
5628 size_t unaccounted, cur_unclean, cur_alloc; | |
5629 size_t total_free; | |
5630 HeapRegion* cur; | |
5631 public: | |
5632 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5633 free_regions(0), unclean_regions(0), | |
5634 hum_regions(0), | |
5635 count(0), unaccounted(0), | |
5636 cur_alloc(0), total_free(0) | |
5637 {} | |
5638 bool doHeapRegion(HeapRegion *r) { | |
5639 count++; | |
5640 if (r->is_on_free_list()) free_regions++; | |
5641 else if (r->is_on_unclean_list()) unclean_regions++; | |
5642 else if (r->isHumongous()) hum_regions++; | |
5643 else if (r->is_young()) young_regions++; | |
5644 else if (r == cur) cur_alloc++; | |
5645 else unaccounted++; | |
5646 return false; | |
5647 } | |
5648 void print() { | |
5649 total_free = free_regions + unclean_regions; | |
5650 gclog_or_tty->print("%d regions\n", count); | |
5651 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5652 total_free, free_regions, unclean_regions); | |
5653 gclog_or_tty->print("%d humongous %d young\n", | |
5654 hum_regions, young_regions); | |
5655 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5656 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5657 } | |
5658 }; | |
5659 | |
5660 void G1CollectedHeap::print_region_counts() { | |
5661 SortHeapRegionClosure sc(_cur_alloc_region); | |
5662 PrintHeapRegionClosure cl; | |
5663 heap_region_iterate(&cl); | |
5664 heap_region_iterate(&sc); | |
5665 sc.print(); | |
5666 print_region_accounting_info(); | |
5667 }; | |
5668 | |
5669 bool G1CollectedHeap::regions_accounted_for() { | |
5670 // TODO: regions accounting for young/survivor/tenured | |
5671 return true; | |
5672 } | |
5673 | |
5674 bool G1CollectedHeap::print_region_accounting_info() { | |
5675 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5676 free_regions(), | |
5677 count_free_regions(), count_free_regions_list(), | |
5678 _free_region_list_size, _unclean_region_list.sz()); | |
5679 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5680 (_cur_alloc_region == NULL ? 0 : 1)); | |
5681 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5682 | |
5683 // TODO: check regions accounting for young/survivor/tenured | |
5684 return true; | |
5685 } | |
5686 | |
5687 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5688 HeapRegion* hr = heap_region_containing(p); | |
5689 if (hr == NULL) { | |
5690 return is_in_permanent(p); | |
5691 } else { | |
5692 return hr->is_in(p); | |
5693 } | |
5694 } | |
941 | 5695 #endif // !PRODUCT |
342 | 5696 |
5697 void G1CollectedHeap::g1_unimplemented() { | |
5698 // Unimplemented(); | |
5699 } |