Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 807:d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
Summary: For heaps larger than 32Gb, the number of heap regions overflows the data type used to hold the region index in the SparsePRT structure. Changed the region indexes, card indexes, and RSet hash table buckets to ints and added some size overflow guarantees.
Reviewed-by: ysr, tonyp
author | johnc |
---|---|
date | Thu, 11 Jun 2009 17:19:33 -0700 |
parents | 29e7d79232b9 |
children | 830ca2573896 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
28 // turn it on so that the contents of the young list (scan-only / | |
29 // to-be-collected) are printed at "strategic" points before / during | |
30 // / after the collection --- this is useful for debugging | |
31 #define SCAN_ONLY_VERBOSE 0 | |
32 // CURRENT STATUS | |
33 // This file is under construction. Search for "FIXME". | |
34 | |
35 // INVARIANTS/NOTES | |
36 // | |
37 // All allocation activity covered by the G1CollectedHeap interface is | |
38 // serialized by acquiring the HeapLock. This happens in | |
39 // mem_allocate_work, which all such allocation functions call. | |
40 // (Note that this does not apply to TLAB allocation, which is not part | |
41 // of this interface: it is done by clients of this interface.) | |
42 | |
43 // Local to this file. | |
44 | |
45 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
46 SuspendibleThreadSet* _sts; | |
47 G1RemSet* _g1rs; | |
48 ConcurrentG1Refine* _cg1r; | |
49 bool _concurrent; | |
50 public: | |
51 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
52 G1RemSet* g1rs, | |
53 ConcurrentG1Refine* cg1r) : | |
54 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
55 {} | |
56 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
57 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); | |
58 if (_concurrent && _sts->should_yield()) { | |
59 // Caller will actually yield. | |
60 return false; | |
61 } | |
62 // Otherwise, we finished successfully; return true. | |
63 return true; | |
64 } | |
65 void set_concurrent(bool b) { _concurrent = b; } | |
66 }; | |
67 | |
68 | |
69 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
70 int _calls; | |
71 G1CollectedHeap* _g1h; | |
72 CardTableModRefBS* _ctbs; | |
73 int _histo[256]; | |
74 public: | |
75 ClearLoggedCardTableEntryClosure() : | |
76 _calls(0) | |
77 { | |
78 _g1h = G1CollectedHeap::heap(); | |
79 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
80 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
81 } | |
82 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
83 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
84 _calls++; | |
85 unsigned char* ujb = (unsigned char*)card_ptr; | |
86 int ind = (int)(*ujb); | |
87 _histo[ind]++; | |
88 *card_ptr = -1; | |
89 } | |
90 return true; | |
91 } | |
92 int calls() { return _calls; } | |
93 void print_histo() { | |
94 gclog_or_tty->print_cr("Card table value histogram:"); | |
95 for (int i = 0; i < 256; i++) { | |
96 if (_histo[i] != 0) { | |
97 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
98 } | |
99 } | |
100 } | |
101 }; | |
102 | |
103 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
104 int _calls; | |
105 G1CollectedHeap* _g1h; | |
106 CardTableModRefBS* _ctbs; | |
107 public: | |
108 RedirtyLoggedCardTableEntryClosure() : | |
109 _calls(0) | |
110 { | |
111 _g1h = G1CollectedHeap::heap(); | |
112 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
113 } | |
114 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
115 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
116 _calls++; | |
117 *card_ptr = 0; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 }; | |
123 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
124 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
125 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
126 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
127 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
128 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
129 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
130 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
131 |
342 | 132 YoungList::YoungList(G1CollectedHeap* g1h) |
133 : _g1h(g1h), _head(NULL), | |
134 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), | |
135 _length(0), _scan_only_length(0), | |
136 _last_sampled_rs_lengths(0), | |
545 | 137 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 138 { |
139 guarantee( check_list_empty(false), "just making sure..." ); | |
140 } | |
141 | |
142 void YoungList::push_region(HeapRegion *hr) { | |
143 assert(!hr->is_young(), "should not already be young"); | |
144 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
145 | |
146 hr->set_next_young_region(_head); | |
147 _head = hr; | |
148 | |
149 hr->set_young(); | |
150 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
151 ++_length; | |
152 } | |
153 | |
154 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 155 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 156 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
157 | |
158 hr->set_next_young_region(_survivor_head); | |
159 if (_survivor_head == NULL) { | |
545 | 160 _survivor_tail = hr; |
342 | 161 } |
162 _survivor_head = hr; | |
163 | |
164 ++_survivor_length; | |
165 } | |
166 | |
167 HeapRegion* YoungList::pop_region() { | |
168 while (_head != NULL) { | |
169 assert( length() > 0, "list should not be empty" ); | |
170 HeapRegion* ret = _head; | |
171 _head = ret->get_next_young_region(); | |
172 ret->set_next_young_region(NULL); | |
173 --_length; | |
174 assert(ret->is_young(), "region should be very young"); | |
175 | |
176 // Replace 'Survivor' region type with 'Young'. So the region will | |
177 // be treated as a young region and will not be 'confused' with | |
178 // newly created survivor regions. | |
179 if (ret->is_survivor()) { | |
180 ret->set_young(); | |
181 } | |
182 | |
183 if (!ret->is_scan_only()) { | |
184 return ret; | |
185 } | |
186 | |
187 // scan-only, we'll add it to the scan-only list | |
188 if (_scan_only_tail == NULL) { | |
189 guarantee( _scan_only_head == NULL, "invariant" ); | |
190 | |
191 _scan_only_head = ret; | |
192 _curr_scan_only = ret; | |
193 } else { | |
194 guarantee( _scan_only_head != NULL, "invariant" ); | |
195 _scan_only_tail->set_next_young_region(ret); | |
196 } | |
197 guarantee( ret->get_next_young_region() == NULL, "invariant" ); | |
198 _scan_only_tail = ret; | |
199 | |
200 // no need to be tagged as scan-only any more | |
201 ret->set_young(); | |
202 | |
203 ++_scan_only_length; | |
204 } | |
205 assert( length() == 0, "list should be empty" ); | |
206 return NULL; | |
207 } | |
208 | |
209 void YoungList::empty_list(HeapRegion* list) { | |
210 while (list != NULL) { | |
211 HeapRegion* next = list->get_next_young_region(); | |
212 list->set_next_young_region(NULL); | |
213 list->uninstall_surv_rate_group(); | |
214 list->set_not_young(); | |
215 list = next; | |
216 } | |
217 } | |
218 | |
219 void YoungList::empty_list() { | |
220 assert(check_list_well_formed(), "young list should be well formed"); | |
221 | |
222 empty_list(_head); | |
223 _head = NULL; | |
224 _length = 0; | |
225 | |
226 empty_list(_scan_only_head); | |
227 _scan_only_head = NULL; | |
228 _scan_only_tail = NULL; | |
229 _scan_only_length = 0; | |
230 _curr_scan_only = NULL; | |
231 | |
232 empty_list(_survivor_head); | |
233 _survivor_head = NULL; | |
545 | 234 _survivor_tail = NULL; |
342 | 235 _survivor_length = 0; |
236 | |
237 _last_sampled_rs_lengths = 0; | |
238 | |
239 assert(check_list_empty(false), "just making sure..."); | |
240 } | |
241 | |
242 bool YoungList::check_list_well_formed() { | |
243 bool ret = true; | |
244 | |
245 size_t length = 0; | |
246 HeapRegion* curr = _head; | |
247 HeapRegion* last = NULL; | |
248 while (curr != NULL) { | |
249 if (!curr->is_young() || curr->is_scan_only()) { | |
250 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
251 "incorrectly tagged (%d, %d)", | |
252 curr->bottom(), curr->end(), | |
253 curr->is_young(), curr->is_scan_only()); | |
254 ret = false; | |
255 } | |
256 ++length; | |
257 last = curr; | |
258 curr = curr->get_next_young_region(); | |
259 } | |
260 ret = ret && (length == _length); | |
261 | |
262 if (!ret) { | |
263 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
264 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
265 length, _length); | |
266 } | |
267 | |
268 bool scan_only_ret = true; | |
269 length = 0; | |
270 curr = _scan_only_head; | |
271 last = NULL; | |
272 while (curr != NULL) { | |
273 if (!curr->is_young() || curr->is_scan_only()) { | |
274 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
275 "incorrectly tagged (%d, %d)", | |
276 curr->bottom(), curr->end(), | |
277 curr->is_young(), curr->is_scan_only()); | |
278 scan_only_ret = false; | |
279 } | |
280 ++length; | |
281 last = curr; | |
282 curr = curr->get_next_young_region(); | |
283 } | |
284 scan_only_ret = scan_only_ret && (length == _scan_only_length); | |
285 | |
286 if ( (last != _scan_only_tail) || | |
287 (_scan_only_head == NULL && _scan_only_tail != NULL) || | |
288 (_scan_only_head != NULL && _scan_only_tail == NULL) ) { | |
289 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); | |
290 scan_only_ret = false; | |
291 } | |
292 | |
293 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { | |
294 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); | |
295 scan_only_ret = false; | |
296 } | |
297 | |
298 if (!scan_only_ret) { | |
299 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); | |
300 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", | |
301 length, _scan_only_length); | |
302 } | |
303 | |
304 return ret && scan_only_ret; | |
305 } | |
306 | |
307 bool YoungList::check_list_empty(bool ignore_scan_only_list, | |
308 bool check_sample) { | |
309 bool ret = true; | |
310 | |
311 if (_length != 0) { | |
312 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
313 _length); | |
314 ret = false; | |
315 } | |
316 if (check_sample && _last_sampled_rs_lengths != 0) { | |
317 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
318 ret = false; | |
319 } | |
320 if (_head != NULL) { | |
321 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
322 ret = false; | |
323 } | |
324 if (!ret) { | |
325 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
326 } | |
327 | |
328 if (ignore_scan_only_list) | |
329 return ret; | |
330 | |
331 bool scan_only_ret = true; | |
332 if (_scan_only_length != 0) { | |
333 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", | |
334 _scan_only_length); | |
335 scan_only_ret = false; | |
336 } | |
337 if (_scan_only_head != NULL) { | |
338 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); | |
339 scan_only_ret = false; | |
340 } | |
341 if (_scan_only_tail != NULL) { | |
342 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); | |
343 scan_only_ret = false; | |
344 } | |
345 if (!scan_only_ret) { | |
346 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); | |
347 } | |
348 | |
349 return ret && scan_only_ret; | |
350 } | |
351 | |
352 void | |
353 YoungList::rs_length_sampling_init() { | |
354 _sampled_rs_lengths = 0; | |
355 _curr = _head; | |
356 } | |
357 | |
358 bool | |
359 YoungList::rs_length_sampling_more() { | |
360 return _curr != NULL; | |
361 } | |
362 | |
363 void | |
364 YoungList::rs_length_sampling_next() { | |
365 assert( _curr != NULL, "invariant" ); | |
366 _sampled_rs_lengths += _curr->rem_set()->occupied(); | |
367 _curr = _curr->get_next_young_region(); | |
368 if (_curr == NULL) { | |
369 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
370 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
371 } | |
372 } | |
373 | |
374 void | |
375 YoungList::reset_auxilary_lists() { | |
376 // We could have just "moved" the scan-only list to the young list. | |
377 // However, the scan-only list is ordered according to the region | |
378 // age in descending order, so, by moving one entry at a time, we | |
379 // ensure that it is recreated in ascending order. | |
380 | |
381 guarantee( is_empty(), "young list should be empty" ); | |
382 assert(check_list_well_formed(), "young list should be well formed"); | |
383 | |
384 // Add survivor regions to SurvRateGroup. | |
385 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 386 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
342 | 387 for (HeapRegion* curr = _survivor_head; |
388 curr != NULL; | |
389 curr = curr->get_next_young_region()) { | |
390 _g1h->g1_policy()->set_region_survivors(curr); | |
391 } | |
392 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
393 | |
394 if (_survivor_head != NULL) { | |
395 _head = _survivor_head; | |
396 _length = _survivor_length + _scan_only_length; | |
545 | 397 _survivor_tail->set_next_young_region(_scan_only_head); |
342 | 398 } else { |
399 _head = _scan_only_head; | |
400 _length = _scan_only_length; | |
401 } | |
402 | |
403 for (HeapRegion* curr = _scan_only_head; | |
404 curr != NULL; | |
405 curr = curr->get_next_young_region()) { | |
406 curr->recalculate_age_in_surv_rate_group(); | |
407 } | |
408 _scan_only_head = NULL; | |
409 _scan_only_tail = NULL; | |
410 _scan_only_length = 0; | |
411 _curr_scan_only = NULL; | |
412 | |
413 _survivor_head = NULL; | |
545 | 414 _survivor_tail = NULL; |
342 | 415 _survivor_length = 0; |
545 | 416 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 417 |
418 assert(check_list_well_formed(), "young list should be well formed"); | |
419 } | |
420 | |
421 void YoungList::print() { | |
422 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; | |
423 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; | |
424 | |
425 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
426 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
427 HeapRegion *curr = lists[list]; | |
428 if (curr == NULL) | |
429 gclog_or_tty->print_cr(" empty"); | |
430 while (curr != NULL) { | |
431 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
432 "age: %4d, y: %d, s-o: %d, surv: %d", | |
433 curr->bottom(), curr->end(), | |
434 curr->top(), | |
435 curr->prev_top_at_mark_start(), | |
436 curr->next_top_at_mark_start(), | |
437 curr->top_at_conc_mark_count(), | |
438 curr->age_in_surv_rate_group_cond(), | |
439 curr->is_young(), | |
440 curr->is_scan_only(), | |
441 curr->is_survivor()); | |
442 curr = curr->get_next_young_region(); | |
443 } | |
444 } | |
445 | |
446 gclog_or_tty->print_cr(""); | |
447 } | |
448 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
449 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
450 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
451 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
452 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
453 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
454 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
455 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
456 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
457 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
458 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
459 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
460 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
461 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
462 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
463 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
464 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
465 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
466 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
467 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
468 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
469 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
470 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
471 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
472 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
473 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
474 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
475 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
476 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
477 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
478 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
479 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
480 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
481 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
482 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
483 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
484 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
485 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
486 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
487 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
488 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
489 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
490 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
491 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
492 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
493 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
494 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
495 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
496 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
497 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
498 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
499 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
500 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
501 |
342 | 502 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 503 _cg1r->stop(); |
342 | 504 _czft->stop(); |
505 _cmThread->stop(); | |
506 } | |
507 | |
508 | |
509 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
510 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
511 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
512 | |
513 // Count the dirty cards at the start. | |
514 CountNonCleanMemRegionClosure count1(this); | |
515 ct_bs->mod_card_iterate(&count1); | |
516 int orig_count = count1.n(); | |
517 | |
518 // First clear the logged cards. | |
519 ClearLoggedCardTableEntryClosure clear; | |
520 dcqs.set_closure(&clear); | |
521 dcqs.apply_closure_to_all_completed_buffers(); | |
522 dcqs.iterate_closure_all_threads(false); | |
523 clear.print_histo(); | |
524 | |
525 // Now ensure that there's no dirty cards. | |
526 CountNonCleanMemRegionClosure count2(this); | |
527 ct_bs->mod_card_iterate(&count2); | |
528 if (count2.n() != 0) { | |
529 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
530 count2.n(), orig_count); | |
531 } | |
532 guarantee(count2.n() == 0, "Card table should be clean."); | |
533 | |
534 RedirtyLoggedCardTableEntryClosure redirty; | |
535 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
536 dcqs.apply_closure_to_all_completed_buffers(); | |
537 dcqs.iterate_closure_all_threads(false); | |
538 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
539 clear.calls(), orig_count); | |
540 guarantee(redirty.calls() == clear.calls(), | |
541 "Or else mechanism is broken."); | |
542 | |
543 CountNonCleanMemRegionClosure count3(this); | |
544 ct_bs->mod_card_iterate(&count3); | |
545 if (count3.n() != orig_count) { | |
546 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
547 orig_count, count3.n()); | |
548 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
549 } | |
550 | |
551 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
552 } | |
553 | |
554 // Private class members. | |
555 | |
556 G1CollectedHeap* G1CollectedHeap::_g1h; | |
557 | |
558 // Private methods. | |
559 | |
560 // Finds a HeapRegion that can be used to allocate a given size of block. | |
561 | |
562 | |
563 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
564 bool do_expand, | |
565 bool zero_filled) { | |
566 ConcurrentZFThread::note_region_alloc(); | |
567 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
568 if (res == NULL && do_expand) { | |
569 expand(word_size * HeapWordSize); | |
570 res = alloc_free_region_from_lists(zero_filled); | |
571 assert(res == NULL || | |
572 (!res->isHumongous() && | |
573 (!zero_filled || | |
574 res->zero_fill_state() == HeapRegion::Allocated)), | |
575 "Alloc Regions must be zero filled (and non-H)"); | |
576 } | |
577 if (res != NULL && res->is_empty()) _free_regions--; | |
578 assert(res == NULL || | |
579 (!res->isHumongous() && | |
580 (!zero_filled || | |
581 res->zero_fill_state() == HeapRegion::Allocated)), | |
582 "Non-young alloc Regions must be zero filled (and non-H)"); | |
583 | |
751 | 584 if (G1PrintRegions) { |
342 | 585 if (res != NULL) { |
586 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
587 "top "PTR_FORMAT, | |
588 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
589 } | |
590 } | |
591 | |
592 return res; | |
593 } | |
594 | |
595 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
596 size_t word_size, | |
597 bool zero_filled) { | |
598 HeapRegion* alloc_region = NULL; | |
599 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
600 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
601 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 602 alloc_region->set_survivor(); |
342 | 603 } |
604 ++_gc_alloc_region_counts[purpose]; | |
605 } else { | |
606 g1_policy()->note_alloc_region_limit_reached(purpose); | |
607 } | |
608 return alloc_region; | |
609 } | |
610 | |
611 // If could fit into free regions w/o expansion, try. | |
612 // Otherwise, if can expand, do so. | |
613 // Otherwise, if using ex regions might help, try with ex given back. | |
614 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
615 assert(regions_accounted_for(), "Region leakage!"); | |
616 | |
617 // We can't allocate H regions while cleanupComplete is running, since | |
618 // some of the regions we find to be empty might not yet be added to the | |
619 // unclean list. (If we're already at a safepoint, this call is | |
620 // unnecessary, not to mention wrong.) | |
621 if (!SafepointSynchronize::is_at_safepoint()) | |
622 wait_for_cleanup_complete(); | |
623 | |
624 size_t num_regions = | |
625 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
626 | |
627 // Special case if < one region??? | |
628 | |
629 // Remember the ft size. | |
630 size_t x_size = expansion_regions(); | |
631 | |
632 HeapWord* res = NULL; | |
633 bool eliminated_allocated_from_lists = false; | |
634 | |
635 // Can the allocation potentially fit in the free regions? | |
636 if (free_regions() >= num_regions) { | |
637 res = _hrs->obj_allocate(word_size); | |
638 } | |
639 if (res == NULL) { | |
640 // Try expansion. | |
641 size_t fs = _hrs->free_suffix(); | |
642 if (fs + x_size >= num_regions) { | |
643 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
644 res = _hrs->obj_allocate(word_size); | |
645 assert(res != NULL, "This should have worked."); | |
646 } else { | |
647 // Expansion won't help. Are there enough free regions if we get rid | |
648 // of reservations? | |
649 size_t avail = free_regions(); | |
650 if (avail >= num_regions) { | |
651 res = _hrs->obj_allocate(word_size); | |
652 if (res != NULL) { | |
653 remove_allocated_regions_from_lists(); | |
654 eliminated_allocated_from_lists = true; | |
655 } | |
656 } | |
657 } | |
658 } | |
659 if (res != NULL) { | |
660 // Increment by the number of regions allocated. | |
661 // FIXME: Assumes regions all of size GrainBytes. | |
662 #ifndef PRODUCT | |
663 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
664 HeapRegion::GrainWords)); | |
665 #endif | |
666 if (!eliminated_allocated_from_lists) | |
667 remove_allocated_regions_from_lists(); | |
668 _summary_bytes_used += word_size * HeapWordSize; | |
669 _free_regions -= num_regions; | |
670 _num_humongous_regions += (int) num_regions; | |
671 } | |
672 assert(regions_accounted_for(), "Region Leakage"); | |
673 return res; | |
674 } | |
675 | |
676 HeapWord* | |
677 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
678 bool permit_collection_pause) { | |
679 HeapWord* res = NULL; | |
680 HeapRegion* allocated_young_region = NULL; | |
681 | |
682 assert( SafepointSynchronize::is_at_safepoint() || | |
683 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
684 | |
685 if (isHumongous(word_size)) { | |
686 // Allocation of a humongous object can, in a sense, complete a | |
687 // partial region, if the previous alloc was also humongous, and | |
688 // caused the test below to succeed. | |
689 if (permit_collection_pause) | |
690 do_collection_pause_if_appropriate(word_size); | |
691 res = humongousObjAllocate(word_size); | |
692 assert(_cur_alloc_region == NULL | |
693 || !_cur_alloc_region->isHumongous(), | |
694 "Prevent a regression of this bug."); | |
695 | |
696 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
697 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
698 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
699 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
700 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
701 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
702 wait_for_cleanup_complete(); |
342 | 703 // If we do a collection pause, this will be reset to a non-NULL |
704 // value. If we don't, nulling here ensures that we allocate a new | |
705 // region below. | |
706 if (_cur_alloc_region != NULL) { | |
707 // We're finished with the _cur_alloc_region. | |
708 _summary_bytes_used += _cur_alloc_region->used(); | |
709 _cur_alloc_region = NULL; | |
710 } | |
711 assert(_cur_alloc_region == NULL, "Invariant."); | |
712 // Completion of a heap region is perhaps a good point at which to do | |
713 // a collection pause. | |
714 if (permit_collection_pause) | |
715 do_collection_pause_if_appropriate(word_size); | |
716 // Make sure we have an allocation region available. | |
717 if (_cur_alloc_region == NULL) { | |
718 if (!SafepointSynchronize::is_at_safepoint()) | |
719 wait_for_cleanup_complete(); | |
720 bool next_is_young = should_set_young_locked(); | |
721 // If the next region is not young, make sure it's zero-filled. | |
722 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
723 if (_cur_alloc_region != NULL) { | |
724 _summary_bytes_used -= _cur_alloc_region->used(); | |
725 if (next_is_young) { | |
726 set_region_short_lived_locked(_cur_alloc_region); | |
727 allocated_young_region = _cur_alloc_region; | |
728 } | |
729 } | |
730 } | |
731 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
732 "Prevent a regression of this bug."); | |
733 | |
734 // Now retry the allocation. | |
735 if (_cur_alloc_region != NULL) { | |
736 res = _cur_alloc_region->allocate(word_size); | |
737 } | |
738 } | |
739 | |
740 // NOTE: fails frequently in PRT | |
741 assert(regions_accounted_for(), "Region leakage!"); | |
742 | |
743 if (res != NULL) { | |
744 if (!SafepointSynchronize::is_at_safepoint()) { | |
745 assert( permit_collection_pause, "invariant" ); | |
746 assert( Heap_lock->owned_by_self(), "invariant" ); | |
747 Heap_lock->unlock(); | |
748 } | |
749 | |
750 if (allocated_young_region != NULL) { | |
751 HeapRegion* hr = allocated_young_region; | |
752 HeapWord* bottom = hr->bottom(); | |
753 HeapWord* end = hr->end(); | |
754 MemRegion mr(bottom, end); | |
755 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
756 } | |
757 } | |
758 | |
759 assert( SafepointSynchronize::is_at_safepoint() || | |
760 (res == NULL && Heap_lock->owned_by_self()) || | |
761 (res != NULL && !Heap_lock->owned_by_self()), | |
762 "post condition of the call" ); | |
763 | |
764 return res; | |
765 } | |
766 | |
767 HeapWord* | |
768 G1CollectedHeap::mem_allocate(size_t word_size, | |
769 bool is_noref, | |
770 bool is_tlab, | |
771 bool* gc_overhead_limit_was_exceeded) { | |
772 debug_only(check_for_valid_allocation_state()); | |
773 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
774 HeapWord* result = NULL; | |
775 | |
776 // Loop until the allocation is satisified, | |
777 // or unsatisfied after GC. | |
778 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
779 int gc_count_before; | |
780 { | |
781 Heap_lock->lock(); | |
782 result = attempt_allocation(word_size); | |
783 if (result != NULL) { | |
784 // attempt_allocation should have unlocked the heap lock | |
785 assert(is_in(result), "result not in heap"); | |
786 return result; | |
787 } | |
788 // Read the gc count while the heap lock is held. | |
789 gc_count_before = SharedHeap::heap()->total_collections(); | |
790 Heap_lock->unlock(); | |
791 } | |
792 | |
793 // Create the garbage collection operation... | |
794 VM_G1CollectForAllocation op(word_size, | |
795 gc_count_before); | |
796 | |
797 // ...and get the VM thread to execute it. | |
798 VMThread::execute(&op); | |
799 if (op.prologue_succeeded()) { | |
800 result = op.result(); | |
801 assert(result == NULL || is_in(result), "result not in heap"); | |
802 return result; | |
803 } | |
804 | |
805 // Give a warning if we seem to be looping forever. | |
806 if ((QueuedAllocationWarningCount > 0) && | |
807 (try_count % QueuedAllocationWarningCount == 0)) { | |
808 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
809 try_count); | |
810 } | |
811 } | |
812 } | |
813 | |
814 void G1CollectedHeap::abandon_cur_alloc_region() { | |
815 if (_cur_alloc_region != NULL) { | |
816 // We're finished with the _cur_alloc_region. | |
817 if (_cur_alloc_region->is_empty()) { | |
818 _free_regions++; | |
819 free_region(_cur_alloc_region); | |
820 } else { | |
821 _summary_bytes_used += _cur_alloc_region->used(); | |
822 } | |
823 _cur_alloc_region = NULL; | |
824 } | |
825 } | |
826 | |
636 | 827 void G1CollectedHeap::abandon_gc_alloc_regions() { |
828 // first, make sure that the GC alloc region list is empty (it should!) | |
829 assert(_gc_alloc_region_list == NULL, "invariant"); | |
830 release_gc_alloc_regions(true /* totally */); | |
831 } | |
832 | |
342 | 833 class PostMCRemSetClearClosure: public HeapRegionClosure { |
834 ModRefBarrierSet* _mr_bs; | |
835 public: | |
836 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
837 bool doHeapRegion(HeapRegion* r) { | |
838 r->reset_gc_time_stamp(); | |
839 if (r->continuesHumongous()) | |
840 return false; | |
841 HeapRegionRemSet* hrrs = r->rem_set(); | |
842 if (hrrs != NULL) hrrs->clear(); | |
843 // You might think here that we could clear just the cards | |
844 // corresponding to the used region. But no: if we leave a dirty card | |
845 // in a region we might allocate into, then it would prevent that card | |
846 // from being enqueued, and cause it to be missed. | |
847 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
848 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
849 return false; | |
850 } | |
851 }; | |
852 | |
853 | |
854 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
855 ModRefBarrierSet* _mr_bs; | |
856 public: | |
857 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
858 bool doHeapRegion(HeapRegion* r) { | |
859 if (r->continuesHumongous()) return false; | |
860 if (r->used_region().word_size() != 0) { | |
861 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
862 } | |
863 return false; | |
864 } | |
865 }; | |
866 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
867 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
868 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
869 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
870 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
871 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
872 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
873 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
874 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
875 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
876 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
877 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
878 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
879 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
880 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
881 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
882 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
883 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
884 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
885 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
886 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
887 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
888 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
889 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
890 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
891 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
892 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
893 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
894 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
895 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
896 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
897 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
898 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
899 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
900 |
342 | 901 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, |
902 size_t word_size) { | |
903 ResourceMark rm; | |
904 | |
905 if (full && DisableExplicitGC) { | |
906 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); | |
907 return; | |
908 } | |
909 | |
910 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
911 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
912 | |
913 if (GC_locker::is_active()) { | |
914 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
915 } | |
916 | |
917 { | |
918 IsGCActiveMark x; | |
919 | |
920 // Timing | |
921 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
922 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
923 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); | |
924 | |
925 double start = os::elapsedTime(); | |
926 GCOverheadReporter::recordSTWStart(start); | |
927 g1_policy()->record_full_collection_start(); | |
928 | |
929 gc_prologue(true); | |
930 increment_total_collections(); | |
931 | |
932 size_t g1h_prev_used = used(); | |
933 assert(used() == recalculate_used(), "Should be equal"); | |
934 | |
935 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
936 HandleMark hm; // Discard invalid handles created during verification | |
937 prepare_for_verify(); | |
938 gclog_or_tty->print(" VerifyBeforeGC:"); | |
939 Universe::verify(true); | |
940 } | |
941 assert(regions_accounted_for(), "Region leakage!"); | |
942 | |
943 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
944 | |
945 // We want to discover references, but not process them yet. | |
946 // This mode is disabled in | |
947 // instanceRefKlass::process_discovered_references if the | |
948 // generation does some collection work, or | |
949 // instanceRefKlass::enqueue_discovered_references if the | |
950 // generation returns without doing any work. | |
951 ref_processor()->disable_discovery(); | |
952 ref_processor()->abandon_partial_discovery(); | |
953 ref_processor()->verify_no_references_recorded(); | |
954 | |
955 // Abandon current iterations of concurrent marking and concurrent | |
956 // refinement, if any are in progress. | |
957 concurrent_mark()->abort(); | |
958 | |
959 // Make sure we'll choose a new allocation region afterwards. | |
960 abandon_cur_alloc_region(); | |
636 | 961 abandon_gc_alloc_regions(); |
342 | 962 assert(_cur_alloc_region == NULL, "Invariant."); |
963 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
964 tear_down_region_lists(); | |
965 set_used_regions_to_need_zero_fill(); | |
966 if (g1_policy()->in_young_gc_mode()) { | |
967 empty_young_list(); | |
968 g1_policy()->set_full_young_gcs(true); | |
969 } | |
970 | |
971 // Temporarily make reference _discovery_ single threaded (non-MT). | |
972 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
973 | |
974 // Temporarily make refs discovery atomic | |
975 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
976 | |
977 // Temporarily clear _is_alive_non_header | |
978 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
979 | |
980 ref_processor()->enable_discovery(); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
981 ref_processor()->setup_policy(clear_all_soft_refs); |
342 | 982 |
983 // Do collection work | |
984 { | |
985 HandleMark hm; // Discard invalid handles created during gc | |
986 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); | |
987 } | |
988 // Because freeing humongous regions may have added some unclean | |
989 // regions, it is necessary to tear down again before rebuilding. | |
990 tear_down_region_lists(); | |
991 rebuild_region_lists(); | |
992 | |
993 _summary_bytes_used = recalculate_used(); | |
994 | |
995 ref_processor()->enqueue_discovered_references(); | |
996 | |
997 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
998 | |
999 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
1000 HandleMark hm; // Discard invalid handles created during verification | |
1001 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1002 prepare_for_verify(); |
342 | 1003 Universe::verify(false); |
1004 } | |
1005 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1006 | |
1007 reset_gc_time_stamp(); | |
1008 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1009 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1010 // sets. We will also reset the GC time stamps of the regions. |
342 | 1011 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1012 heap_region_iterate(&rs_clear); | |
1013 | |
1014 // Resize the heap if necessary. | |
1015 resize_if_necessary_after_full_collection(full ? 0 : word_size); | |
1016 | |
1017 if (_cg1r->use_cache()) { | |
1018 _cg1r->clear_and_record_card_counts(); | |
1019 _cg1r->clear_hot_cache(); | |
1020 } | |
1021 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1022 // Rebuild remembered sets of all regions. |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1023 if (ParallelGCThreads > 0) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1024 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1025 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1026 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1027 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1028 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1029 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1030 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1031 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1032 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1033 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1034 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1035 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1036 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1037 |
342 | 1038 if (PrintGC) { |
1039 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1040 } | |
1041 | |
1042 if (true) { // FIXME | |
1043 // Ask the permanent generation to adjust size for full collections | |
1044 perm()->compute_new_size(); | |
1045 } | |
1046 | |
1047 double end = os::elapsedTime(); | |
1048 GCOverheadReporter::recordSTWEnd(end); | |
1049 g1_policy()->record_full_collection_end(); | |
1050 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1051 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1052 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1053 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1054 |
342 | 1055 gc_epilogue(true); |
1056 | |
794 | 1057 // Discard all rset updates |
1058 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1059 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1060 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1061 assert(regions_accounted_for(), "Region leakage!"); |
1062 } | |
1063 | |
1064 if (g1_policy()->in_young_gc_mode()) { | |
1065 _young_list->reset_sampled_info(); | |
1066 assert( check_young_list_empty(false, false), | |
1067 "young list should be empty at this point"); | |
1068 } | |
1069 } | |
1070 | |
1071 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1072 do_collection(true, clear_all_soft_refs, 0); | |
1073 } | |
1074 | |
1075 // This code is mostly copied from TenuredGeneration. | |
1076 void | |
1077 G1CollectedHeap:: | |
1078 resize_if_necessary_after_full_collection(size_t word_size) { | |
1079 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1080 | |
1081 // Include the current allocation, if any, and bytes that will be | |
1082 // pre-allocated to support collections, as "used". | |
1083 const size_t used_after_gc = used(); | |
1084 const size_t capacity_after_gc = capacity(); | |
1085 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1086 | |
1087 // We don't have floating point command-line arguments | |
1088 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; | |
1089 const double maximum_used_percentage = 1.0 - minimum_free_percentage; | |
1090 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
1091 const double minimum_used_percentage = 1.0 - maximum_free_percentage; | |
1092 | |
1093 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); | |
1094 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); | |
1095 | |
1096 // Don't shrink less than the initial size. | |
1097 minimum_desired_capacity = | |
1098 MAX2(minimum_desired_capacity, | |
1099 collector_policy()->initial_heap_byte_size()); | |
1100 maximum_desired_capacity = | |
1101 MAX2(maximum_desired_capacity, | |
1102 collector_policy()->initial_heap_byte_size()); | |
1103 | |
1104 // We are failing here because minimum_desired_capacity is | |
1105 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); | |
1106 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); | |
1107 | |
1108 if (PrintGC && Verbose) { | |
1109 const double free_percentage = ((double)free_after_gc) / capacity(); | |
1110 gclog_or_tty->print_cr("Computing new size after full GC "); | |
1111 gclog_or_tty->print_cr(" " | |
1112 " minimum_free_percentage: %6.2f", | |
1113 minimum_free_percentage); | |
1114 gclog_or_tty->print_cr(" " | |
1115 " maximum_free_percentage: %6.2f", | |
1116 maximum_free_percentage); | |
1117 gclog_or_tty->print_cr(" " | |
1118 " capacity: %6.1fK" | |
1119 " minimum_desired_capacity: %6.1fK" | |
1120 " maximum_desired_capacity: %6.1fK", | |
1121 capacity() / (double) K, | |
1122 minimum_desired_capacity / (double) K, | |
1123 maximum_desired_capacity / (double) K); | |
1124 gclog_or_tty->print_cr(" " | |
1125 " free_after_gc : %6.1fK" | |
1126 " used_after_gc : %6.1fK", | |
1127 free_after_gc / (double) K, | |
1128 used_after_gc / (double) K); | |
1129 gclog_or_tty->print_cr(" " | |
1130 " free_percentage: %6.2f", | |
1131 free_percentage); | |
1132 } | |
1133 if (capacity() < minimum_desired_capacity) { | |
1134 // Don't expand unless it's significant | |
1135 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1136 expand(expand_bytes); | |
1137 if (PrintGC && Verbose) { | |
1138 gclog_or_tty->print_cr(" expanding:" | |
1139 " minimum_desired_capacity: %6.1fK" | |
1140 " expand_bytes: %6.1fK", | |
1141 minimum_desired_capacity / (double) K, | |
1142 expand_bytes / (double) K); | |
1143 } | |
1144 | |
1145 // No expansion, now see if we want to shrink | |
1146 } else if (capacity() > maximum_desired_capacity) { | |
1147 // Capacity too large, compute shrinking size | |
1148 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1149 shrink(shrink_bytes); | |
1150 if (PrintGC && Verbose) { | |
1151 gclog_or_tty->print_cr(" " | |
1152 " shrinking:" | |
1153 " initSize: %.1fK" | |
1154 " maximum_desired_capacity: %.1fK", | |
1155 collector_policy()->initial_heap_byte_size() / (double) K, | |
1156 maximum_desired_capacity / (double) K); | |
1157 gclog_or_tty->print_cr(" " | |
1158 " shrink_bytes: %.1fK", | |
1159 shrink_bytes / (double) K); | |
1160 } | |
1161 } | |
1162 } | |
1163 | |
1164 | |
1165 HeapWord* | |
1166 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1167 HeapWord* result = NULL; | |
1168 | |
1169 // In a G1 heap, we're supposed to keep allocation from failing by | |
1170 // incremental pauses. Therefore, at least for now, we'll favor | |
1171 // expansion over collection. (This might change in the future if we can | |
1172 // do something smarter than full collection to satisfy a failed alloc.) | |
1173 | |
1174 result = expand_and_allocate(word_size); | |
1175 if (result != NULL) { | |
1176 assert(is_in(result), "result not in heap"); | |
1177 return result; | |
1178 } | |
1179 | |
1180 // OK, I guess we have to try collection. | |
1181 | |
1182 do_collection(false, false, word_size); | |
1183 | |
1184 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1185 | |
1186 if (result != NULL) { | |
1187 assert(is_in(result), "result not in heap"); | |
1188 return result; | |
1189 } | |
1190 | |
1191 // Try collecting soft references. | |
1192 do_collection(false, true, word_size); | |
1193 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1194 if (result != NULL) { | |
1195 assert(is_in(result), "result not in heap"); | |
1196 return result; | |
1197 } | |
1198 | |
1199 // What else? We might try synchronous finalization later. If the total | |
1200 // space available is large enough for the allocation, then a more | |
1201 // complete compaction phase than we've tried so far might be | |
1202 // appropriate. | |
1203 return NULL; | |
1204 } | |
1205 | |
1206 // Attempting to expand the heap sufficiently | |
1207 // to support an allocation of the given "word_size". If | |
1208 // successful, perform the allocation and return the address of the | |
1209 // allocated block, or else "NULL". | |
1210 | |
1211 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1212 size_t expand_bytes = word_size * HeapWordSize; | |
1213 if (expand_bytes < MinHeapDeltaBytes) { | |
1214 expand_bytes = MinHeapDeltaBytes; | |
1215 } | |
1216 expand(expand_bytes); | |
1217 assert(regions_accounted_for(), "Region leakage!"); | |
1218 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1219 return result; | |
1220 } | |
1221 | |
1222 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1223 size_t pre_used = 0; | |
1224 size_t cleared_h_regions = 0; | |
1225 size_t freed_regions = 0; | |
1226 UncleanRegionList local_list; | |
1227 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1228 freed_regions, &local_list); | |
1229 | |
1230 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1231 &local_list); | |
1232 return pre_used; | |
1233 } | |
1234 | |
1235 void | |
1236 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1237 size_t& pre_used, | |
1238 size_t& cleared_h, | |
1239 size_t& freed_regions, | |
1240 UncleanRegionList* list, | |
1241 bool par) { | |
1242 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1243 size_t res = 0; | |
677 | 1244 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1245 !hr->is_young()) { | |
1246 if (G1PolicyVerbose > 0) | |
1247 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1248 " during cleanup", hr, hr->used()); | |
1249 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1250 } |
1251 } | |
1252 | |
1253 // FIXME: both this and shrink could probably be more efficient by | |
1254 // doing one "VirtualSpace::expand_by" call rather than several. | |
1255 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1256 size_t old_mem_size = _g1_storage.committed_size(); | |
1257 // We expand by a minimum of 1K. | |
1258 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1259 size_t aligned_expand_bytes = | |
1260 ReservedSpace::page_align_size_up(expand_bytes); | |
1261 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1262 HeapRegion::GrainBytes); | |
1263 expand_bytes = aligned_expand_bytes; | |
1264 while (expand_bytes > 0) { | |
1265 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1266 // Commit more storage. | |
1267 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1268 if (!successful) { | |
1269 expand_bytes = 0; | |
1270 } else { | |
1271 expand_bytes -= HeapRegion::GrainBytes; | |
1272 // Expand the committed region. | |
1273 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1274 _g1_committed.set_end(high); | |
1275 // Create a new HeapRegion. | |
1276 MemRegion mr(base, high); | |
1277 bool is_zeroed = !_g1_max_committed.contains(base); | |
1278 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1279 | |
1280 // Now update max_committed if necessary. | |
1281 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1282 | |
1283 // Add it to the HeapRegionSeq. | |
1284 _hrs->insert(hr); | |
1285 // Set the zero-fill state, according to whether it's already | |
1286 // zeroed. | |
1287 { | |
1288 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1289 if (is_zeroed) { | |
1290 hr->set_zero_fill_complete(); | |
1291 put_free_region_on_list_locked(hr); | |
1292 } else { | |
1293 hr->set_zero_fill_needed(); | |
1294 put_region_on_unclean_list_locked(hr); | |
1295 } | |
1296 } | |
1297 _free_regions++; | |
1298 // And we used up an expansion region to create it. | |
1299 _expansion_regions--; | |
1300 // Tell the cardtable about it. | |
1301 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1302 // And the offset table as well. | |
1303 _bot_shared->resize(_g1_committed.word_size()); | |
1304 } | |
1305 } | |
1306 if (Verbose && PrintGC) { | |
1307 size_t new_mem_size = _g1_storage.committed_size(); | |
1308 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1309 old_mem_size/K, aligned_expand_bytes/K, | |
1310 new_mem_size/K); | |
1311 } | |
1312 } | |
1313 | |
1314 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1315 { | |
1316 size_t old_mem_size = _g1_storage.committed_size(); | |
1317 size_t aligned_shrink_bytes = | |
1318 ReservedSpace::page_align_size_down(shrink_bytes); | |
1319 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1320 HeapRegion::GrainBytes); | |
1321 size_t num_regions_deleted = 0; | |
1322 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1323 | |
1324 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1325 if (mr.byte_size() > 0) | |
1326 _g1_storage.shrink_by(mr.byte_size()); | |
1327 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1328 | |
1329 _g1_committed.set_end(mr.start()); | |
1330 _free_regions -= num_regions_deleted; | |
1331 _expansion_regions += num_regions_deleted; | |
1332 | |
1333 // Tell the cardtable about it. | |
1334 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1335 | |
1336 // And the offset table as well. | |
1337 _bot_shared->resize(_g1_committed.word_size()); | |
1338 | |
1339 HeapRegionRemSet::shrink_heap(n_regions()); | |
1340 | |
1341 if (Verbose && PrintGC) { | |
1342 size_t new_mem_size = _g1_storage.committed_size(); | |
1343 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1344 old_mem_size/K, aligned_shrink_bytes/K, | |
1345 new_mem_size/K); | |
1346 } | |
1347 } | |
1348 | |
1349 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1350 release_gc_alloc_regions(true /* totally */); |
342 | 1351 tear_down_region_lists(); // We will rebuild them in a moment. |
1352 shrink_helper(shrink_bytes); | |
1353 rebuild_region_lists(); | |
1354 } | |
1355 | |
1356 // Public methods. | |
1357 | |
1358 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1359 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1360 #endif // _MSC_VER | |
1361 | |
1362 | |
1363 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1364 SharedHeap(policy_), | |
1365 _g1_policy(policy_), | |
1366 _ref_processor(NULL), | |
1367 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1368 _bot_shared(NULL), | |
1369 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1370 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1371 _evac_failure_scan_stack(NULL) , | |
1372 _mark_in_progress(false), | |
1373 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1374 _cur_alloc_region(NULL), | |
1375 _refine_cte_cl(NULL), | |
1376 _free_region_list(NULL), _free_region_list_size(0), | |
1377 _free_regions(0), | |
1378 _full_collection(false), | |
1379 _unclean_region_list(), | |
1380 _unclean_regions_coming(false), | |
1381 _young_list(new YoungList(this)), | |
1382 _gc_time_stamp(0), | |
526 | 1383 _surviving_young_words(NULL), |
1384 _in_cset_fast_test(NULL), | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1385 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1386 _dirty_cards_region_list(NULL) { |
342 | 1387 _g1h = this; // To catch bugs. |
1388 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1389 vm_exit_during_initialization("Failed necessary allocation."); | |
1390 } | |
1391 int n_queues = MAX2((int)ParallelGCThreads, 1); | |
1392 _task_queues = new RefToScanQueueSet(n_queues); | |
1393 | |
1394 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1395 assert(n_rem_sets > 0, "Invariant."); | |
1396 | |
1397 HeapRegionRemSetIterator** iter_arr = | |
1398 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1399 for (int i = 0; i < n_queues; i++) { | |
1400 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1401 } | |
1402 _rem_set_iterator = iter_arr; | |
1403 | |
1404 for (int i = 0; i < n_queues; i++) { | |
1405 RefToScanQueue* q = new RefToScanQueue(); | |
1406 q->initialize(); | |
1407 _task_queues->register_queue(i, q); | |
1408 } | |
1409 | |
1410 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1411 _gc_alloc_regions[ap] = NULL; |
1412 _gc_alloc_region_counts[ap] = 0; | |
1413 _retained_gc_alloc_regions[ap] = NULL; | |
1414 // by default, we do not retain a GC alloc region for each ap; | |
1415 // we'll override this, when appropriate, below | |
1416 _retain_gc_alloc_region[ap] = false; | |
1417 } | |
1418 | |
1419 // We will try to remember the last half-full tenured region we | |
1420 // allocated to at the end of a collection so that we can re-use it | |
1421 // during the next collection. | |
1422 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1423 | |
342 | 1424 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1425 } | |
1426 | |
1427 jint G1CollectedHeap::initialize() { | |
1428 os::enable_vtime(); | |
1429 | |
1430 // Necessary to satisfy locking discipline assertions. | |
1431 | |
1432 MutexLocker x(Heap_lock); | |
1433 | |
1434 // While there are no constraints in the GC code that HeapWordSize | |
1435 // be any particular value, there are multiple other areas in the | |
1436 // system which believe this to be true (e.g. oop->object_size in some | |
1437 // cases incorrectly returns the size in wordSize units rather than | |
1438 // HeapWordSize). | |
1439 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1440 | |
1441 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1442 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1443 | |
1444 // Ensure that the sizes are properly aligned. | |
1445 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1446 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1447 | |
1448 // We allocate this in any case, but only do no work if the command line | |
1449 // param is off. | |
1450 _cg1r = new ConcurrentG1Refine(); | |
1451 | |
1452 // Reserve the maximum. | |
1453 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1454 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1455 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1456 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1457 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1458 |
342 | 1459 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1460 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1461 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1462 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1463 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1464 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1465 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1466 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1467 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1468 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1469 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1470 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1471 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1472 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1473 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1474 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1475 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1476 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1477 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1478 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1479 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1480 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1481 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1482 } |
342 | 1483 |
1484 if (!heap_rs.is_reserved()) { | |
1485 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1486 return JNI_ENOMEM; | |
1487 } | |
1488 | |
1489 // It is important to do this in a way such that concurrent readers can't | |
1490 // temporarily think somethings in the heap. (I've actually seen this | |
1491 // happen in asserts: DLD.) | |
1492 _reserved.set_word_size(0); | |
1493 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1494 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1495 | |
1496 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1497 | |
1498 _num_humongous_regions = 0; | |
1499 | |
1500 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1501 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1502 set_barrier_set(rem_set()->bs()); | |
1503 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1504 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1505 } else { | |
1506 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1507 return JNI_ENOMEM; | |
1508 } | |
1509 | |
1510 // Also create a G1 rem set. | |
1511 if (G1UseHRIntoRS) { | |
1512 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1513 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1514 } else { | |
1515 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1516 return JNI_ENOMEM; | |
1517 } | |
1518 } else { | |
1519 _g1_rem_set = new StupidG1RemSet(this); | |
1520 } | |
1521 | |
1522 // Carve out the G1 part of the heap. | |
1523 | |
1524 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1525 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1526 g1_rs.size()/HeapWordSize); | |
1527 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1528 | |
1529 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1530 | |
1531 _g1_storage.initialize(g1_rs, 0); | |
1532 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1533 _g1_max_committed = _g1_committed; | |
393 | 1534 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1535 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1536 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1537 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1538 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1539 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1540 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1541 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1542 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1543 const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1544 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1545 guarantee(cards_per_region < max_cards_per_region, "too many cards per region"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1546 |
342 | 1547 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1548 heap_word_size(init_byte_size)); | |
1549 | |
1550 _g1h = this; | |
1551 | |
1552 // Create the ConcurrentMark data structure and thread. | |
1553 // (Must do this late, so that "max_regions" is defined.) | |
1554 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1555 _cmThread = _cm->cmThread(); | |
1556 | |
1557 // ...and the concurrent zero-fill thread, if necessary. | |
1558 if (G1ConcZeroFill) { | |
1559 _czft = new ConcurrentZFThread(); | |
1560 } | |
1561 | |
1562 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1563 HeapRegionRemSet::init_heap(max_regions()); | |
1564 | |
677 | 1565 // Now expand into the initial heap size. |
1566 expand(init_byte_size); | |
342 | 1567 |
1568 // Perform any initialization actions delegated to the policy. | |
1569 g1_policy()->init(); | |
1570 | |
1571 g1_policy()->note_start_of_mark_thread(); | |
1572 | |
1573 _refine_cte_cl = | |
1574 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1575 g1_rem_set(), | |
1576 concurrent_g1_refine()); | |
1577 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1578 | |
1579 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1580 SATB_Q_FL_lock, | |
1581 0, | |
1582 Shared_SATB_Q_lock); | |
794 | 1583 |
1584 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1585 DirtyCardQ_FL_lock, | |
1586 G1DirtyCardQueueMax, | |
1587 Shared_DirtyCardQ_lock); | |
1588 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1589 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1590 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1591 DirtyCardQ_FL_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1592 0, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1593 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1594 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1595 } |
342 | 1596 // In case we're keeping closure specialization stats, initialize those |
1597 // counts and that mechanism. | |
1598 SpecializationStats::clear(); | |
1599 | |
1600 _gc_alloc_region_list = NULL; | |
1601 | |
1602 // Do later initialization work for concurrent refinement. | |
1603 _cg1r->init(); | |
1604 | |
1605 const char* group_names[] = { "CR", "ZF", "CM", "CL" }; | |
1606 GCOverheadReporter::initGCOverheadReporter(4, group_names); | |
1607 | |
1608 return JNI_OK; | |
1609 } | |
1610 | |
1611 void G1CollectedHeap::ref_processing_init() { | |
1612 SharedHeap::ref_processing_init(); | |
1613 MemRegion mr = reserved_region(); | |
1614 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1615 mr, // span | |
1616 false, // Reference discovery is not atomic | |
1617 // (though it shouldn't matter here.) | |
1618 true, // mt_discovery | |
1619 NULL, // is alive closure: need to fill this in for efficiency | |
1620 ParallelGCThreads, | |
1621 ParallelRefProcEnabled, | |
1622 true); // Setting next fields of discovered | |
1623 // lists requires a barrier. | |
1624 } | |
1625 | |
1626 size_t G1CollectedHeap::capacity() const { | |
1627 return _g1_committed.byte_size(); | |
1628 } | |
1629 | |
1630 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, | |
1631 int worker_i) { | |
1632 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
1633 int n_completed_buffers = 0; | |
1634 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { | |
1635 n_completed_buffers++; | |
1636 } | |
1637 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1638 (double) n_completed_buffers); | |
1639 dcqs.clear_n_completed_buffers(); | |
1640 // Finish up the queue... | |
1641 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, | |
1642 g1_rem_set()); | |
1643 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1644 } | |
1645 | |
1646 | |
1647 // Computes the sum of the storage used by the various regions. | |
1648 | |
1649 size_t G1CollectedHeap::used() const { | |
1650 assert(Heap_lock->owner() != NULL, | |
1651 "Should be owned on this thread's behalf."); | |
1652 size_t result = _summary_bytes_used; | |
1653 if (_cur_alloc_region != NULL) | |
1654 result += _cur_alloc_region->used(); | |
1655 return result; | |
1656 } | |
1657 | |
1658 class SumUsedClosure: public HeapRegionClosure { | |
1659 size_t _used; | |
1660 public: | |
1661 SumUsedClosure() : _used(0) {} | |
1662 bool doHeapRegion(HeapRegion* r) { | |
1663 if (!r->continuesHumongous()) { | |
1664 _used += r->used(); | |
1665 } | |
1666 return false; | |
1667 } | |
1668 size_t result() { return _used; } | |
1669 }; | |
1670 | |
1671 size_t G1CollectedHeap::recalculate_used() const { | |
1672 SumUsedClosure blk; | |
1673 _hrs->iterate(&blk); | |
1674 return blk.result(); | |
1675 } | |
1676 | |
1677 #ifndef PRODUCT | |
1678 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1679 size_t _num; | |
1680 public: | |
677 | 1681 SumUsedRegionsClosure() : _num(0) {} |
342 | 1682 bool doHeapRegion(HeapRegion* r) { |
1683 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1684 _num += 1; | |
1685 } | |
1686 return false; | |
1687 } | |
1688 size_t result() { return _num; } | |
1689 }; | |
1690 | |
1691 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1692 SumUsedRegionsClosure blk; | |
1693 _hrs->iterate(&blk); | |
1694 return blk.result(); | |
1695 } | |
1696 #endif // PRODUCT | |
1697 | |
1698 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1699 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1700 // otherwise, is there space in the current allocation region? | |
1701 | |
1702 // We need to store the current allocation region in a local variable | |
1703 // here. The problem is that this method doesn't take any locks and | |
1704 // there may be other threads which overwrite the current allocation | |
1705 // region field. attempt_allocation(), for example, sets it to NULL | |
1706 // and this can happen *after* the NULL check here but before the call | |
1707 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1708 // to be a problem in the optimized build, since the two loads of the | |
1709 // current allocation region field are optimized away. | |
1710 HeapRegion* car = _cur_alloc_region; | |
1711 | |
1712 // FIXME: should iterate over all regions? | |
1713 if (car == NULL) { | |
1714 return 0; | |
1715 } | |
1716 return car->free(); | |
1717 } | |
1718 | |
1719 void G1CollectedHeap::collect(GCCause::Cause cause) { | |
1720 // The caller doesn't have the Heap_lock | |
1721 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
1722 MutexLocker ml(Heap_lock); | |
1723 collect_locked(cause); | |
1724 } | |
1725 | |
1726 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
1727 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1728 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1729 GCCauseSetter gcs(this, cause); | |
1730 switch (cause) { | |
1731 case GCCause::_heap_inspection: | |
1732 case GCCause::_heap_dump: { | |
1733 HandleMark hm; | |
1734 do_full_collection(false); // don't clear all soft refs | |
1735 break; | |
1736 } | |
1737 default: // XXX FIX ME | |
1738 ShouldNotReachHere(); // Unexpected use of this function | |
1739 } | |
1740 } | |
1741 | |
1742 | |
1743 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { | |
1744 // Don't want to do a GC until cleanup is completed. | |
1745 wait_for_cleanup_complete(); | |
1746 | |
1747 // Read the GC count while holding the Heap_lock | |
1748 int gc_count_before = SharedHeap::heap()->total_collections(); | |
1749 { | |
1750 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1751 VM_G1CollectFull op(gc_count_before, cause); | |
1752 VMThread::execute(&op); | |
1753 } | |
1754 } | |
1755 | |
1756 bool G1CollectedHeap::is_in(const void* p) const { | |
1757 if (_g1_committed.contains(p)) { | |
1758 HeapRegion* hr = _hrs->addr_to_region(p); | |
1759 return hr->is_in(p); | |
1760 } else { | |
1761 return _perm_gen->as_gen()->is_in(p); | |
1762 } | |
1763 } | |
1764 | |
1765 // Iteration functions. | |
1766 | |
1767 // Iterates an OopClosure over all ref-containing fields of objects | |
1768 // within a HeapRegion. | |
1769 | |
1770 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1771 MemRegion _mr; | |
1772 OopClosure* _cl; | |
1773 public: | |
1774 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1775 : _mr(mr), _cl(cl) {} | |
1776 bool doHeapRegion(HeapRegion* r) { | |
1777 if (! r->continuesHumongous()) { | |
1778 r->oop_iterate(_cl); | |
1779 } | |
1780 return false; | |
1781 } | |
1782 }; | |
1783 | |
678 | 1784 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 1785 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
1786 _hrs->iterate(&blk); | |
678 | 1787 if (do_perm) { |
1788 perm_gen()->oop_iterate(cl); | |
1789 } | |
342 | 1790 } |
1791 | |
678 | 1792 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 1793 IterateOopClosureRegionClosure blk(mr, cl); |
1794 _hrs->iterate(&blk); | |
678 | 1795 if (do_perm) { |
1796 perm_gen()->oop_iterate(cl); | |
1797 } | |
342 | 1798 } |
1799 | |
1800 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1801 | |
1802 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1803 ObjectClosure* _cl; | |
1804 public: | |
1805 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1806 bool doHeapRegion(HeapRegion* r) { | |
1807 if (! r->continuesHumongous()) { | |
1808 r->object_iterate(_cl); | |
1809 } | |
1810 return false; | |
1811 } | |
1812 }; | |
1813 | |
678 | 1814 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 1815 IterateObjectClosureRegionClosure blk(cl); |
1816 _hrs->iterate(&blk); | |
678 | 1817 if (do_perm) { |
1818 perm_gen()->object_iterate(cl); | |
1819 } | |
342 | 1820 } |
1821 | |
1822 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1823 // FIXME: is this right? | |
1824 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1825 } | |
1826 | |
1827 // Calls a SpaceClosure on a HeapRegion. | |
1828 | |
1829 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1830 SpaceClosure* _cl; | |
1831 public: | |
1832 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1833 bool doHeapRegion(HeapRegion* r) { | |
1834 _cl->do_space(r); | |
1835 return false; | |
1836 } | |
1837 }; | |
1838 | |
1839 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1840 SpaceClosureRegionClosure blk(cl); | |
1841 _hrs->iterate(&blk); | |
1842 } | |
1843 | |
1844 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1845 _hrs->iterate(cl); | |
1846 } | |
1847 | |
1848 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1849 HeapRegionClosure* cl) { | |
1850 _hrs->iterate_from(r, cl); | |
1851 } | |
1852 | |
1853 void | |
1854 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1855 _hrs->iterate_from(idx, cl); | |
1856 } | |
1857 | |
1858 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1859 | |
1860 void | |
1861 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1862 int worker, | |
1863 jint claim_value) { | |
355 | 1864 const size_t regions = n_regions(); |
1865 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1866 // try to spread out the starting points of the workers | |
1867 const size_t start_index = regions / worker_num * (size_t) worker; | |
1868 | |
1869 // each worker will actually look at all regions | |
1870 for (size_t count = 0; count < regions; ++count) { | |
1871 const size_t index = (start_index + count) % regions; | |
1872 assert(0 <= index && index < regions, "sanity"); | |
1873 HeapRegion* r = region_at(index); | |
1874 // we'll ignore "continues humongous" regions (we'll process them | |
1875 // when we come across their corresponding "start humongous" | |
1876 // region) and regions already claimed | |
1877 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1878 continue; | |
1879 } | |
1880 // OK, try to claim it | |
342 | 1881 if (r->claimHeapRegion(claim_value)) { |
355 | 1882 // success! |
1883 assert(!r->continuesHumongous(), "sanity"); | |
1884 if (r->startsHumongous()) { | |
1885 // If the region is "starts humongous" we'll iterate over its | |
1886 // "continues humongous" first; in fact we'll do them | |
1887 // first. The order is important. In on case, calling the | |
1888 // closure on the "starts humongous" region might de-allocate | |
1889 // and clear all its "continues humongous" regions and, as a | |
1890 // result, we might end up processing them twice. So, we'll do | |
1891 // them first (notice: most closures will ignore them anyway) and | |
1892 // then we'll do the "starts humongous" region. | |
1893 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1894 HeapRegion* chr = region_at(ch_index); | |
1895 | |
1896 // if the region has already been claimed or it's not | |
1897 // "continues humongous" we're done | |
1898 if (chr->claim_value() == claim_value || | |
1899 !chr->continuesHumongous()) { | |
1900 break; | |
1901 } | |
1902 | |
1903 // Noone should have claimed it directly. We can given | |
1904 // that we claimed its "starts humongous" region. | |
1905 assert(chr->claim_value() != claim_value, "sanity"); | |
1906 assert(chr->humongous_start_region() == r, "sanity"); | |
1907 | |
1908 if (chr->claimHeapRegion(claim_value)) { | |
1909 // we should always be able to claim it; noone else should | |
1910 // be trying to claim this region | |
1911 | |
1912 bool res2 = cl->doHeapRegion(chr); | |
1913 assert(!res2, "Should not abort"); | |
1914 | |
1915 // Right now, this holds (i.e., no closure that actually | |
1916 // does something with "continues humongous" regions | |
1917 // clears them). We might have to weaken it in the future, | |
1918 // but let's leave these two asserts here for extra safety. | |
1919 assert(chr->continuesHumongous(), "should still be the case"); | |
1920 assert(chr->humongous_start_region() == r, "sanity"); | |
1921 } else { | |
1922 guarantee(false, "we should not reach here"); | |
1923 } | |
1924 } | |
1925 } | |
1926 | |
1927 assert(!r->continuesHumongous(), "sanity"); | |
1928 bool res = cl->doHeapRegion(r); | |
1929 assert(!res, "Should not abort"); | |
1930 } | |
1931 } | |
1932 } | |
1933 | |
390 | 1934 class ResetClaimValuesClosure: public HeapRegionClosure { |
1935 public: | |
1936 bool doHeapRegion(HeapRegion* r) { | |
1937 r->set_claim_value(HeapRegion::InitialClaimValue); | |
1938 return false; | |
1939 } | |
1940 }; | |
1941 | |
1942 void | |
1943 G1CollectedHeap::reset_heap_region_claim_values() { | |
1944 ResetClaimValuesClosure blk; | |
1945 heap_region_iterate(&blk); | |
1946 } | |
1947 | |
355 | 1948 #ifdef ASSERT |
1949 // This checks whether all regions in the heap have the correct claim | |
1950 // value. I also piggy-backed on this a check to ensure that the | |
1951 // humongous_start_region() information on "continues humongous" | |
1952 // regions is correct. | |
1953 | |
1954 class CheckClaimValuesClosure : public HeapRegionClosure { | |
1955 private: | |
1956 jint _claim_value; | |
1957 size_t _failures; | |
1958 HeapRegion* _sh_region; | |
1959 public: | |
1960 CheckClaimValuesClosure(jint claim_value) : | |
1961 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
1962 bool doHeapRegion(HeapRegion* r) { | |
1963 if (r->claim_value() != _claim_value) { | |
1964 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1965 "claim value = %d, should be %d", | |
1966 r->bottom(), r->end(), r->claim_value(), | |
1967 _claim_value); | |
1968 ++_failures; | |
1969 } | |
1970 if (!r->isHumongous()) { | |
1971 _sh_region = NULL; | |
1972 } else if (r->startsHumongous()) { | |
1973 _sh_region = r; | |
1974 } else if (r->continuesHumongous()) { | |
1975 if (r->humongous_start_region() != _sh_region) { | |
1976 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1977 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
1978 r->bottom(), r->end(), | |
1979 r->humongous_start_region(), | |
1980 _sh_region); | |
1981 ++_failures; | |
342 | 1982 } |
1983 } | |
355 | 1984 return false; |
1985 } | |
1986 size_t failures() { | |
1987 return _failures; | |
1988 } | |
1989 }; | |
1990 | |
1991 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
1992 CheckClaimValuesClosure cl(claim_value); | |
1993 heap_region_iterate(&cl); | |
1994 return cl.failures() == 0; | |
1995 } | |
1996 #endif // ASSERT | |
342 | 1997 |
1998 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
1999 HeapRegion* r = g1_policy()->collection_set(); | |
2000 while (r != NULL) { | |
2001 HeapRegion* next = r->next_in_collection_set(); | |
2002 if (cl->doHeapRegion(r)) { | |
2003 cl->incomplete(); | |
2004 return; | |
2005 } | |
2006 r = next; | |
2007 } | |
2008 } | |
2009 | |
2010 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2011 HeapRegionClosure *cl) { | |
2012 assert(r->in_collection_set(), | |
2013 "Start region must be a member of the collection set."); | |
2014 HeapRegion* cur = r; | |
2015 while (cur != NULL) { | |
2016 HeapRegion* next = cur->next_in_collection_set(); | |
2017 if (cl->doHeapRegion(cur) && false) { | |
2018 cl->incomplete(); | |
2019 return; | |
2020 } | |
2021 cur = next; | |
2022 } | |
2023 cur = g1_policy()->collection_set(); | |
2024 while (cur != r) { | |
2025 HeapRegion* next = cur->next_in_collection_set(); | |
2026 if (cl->doHeapRegion(cur) && false) { | |
2027 cl->incomplete(); | |
2028 return; | |
2029 } | |
2030 cur = next; | |
2031 } | |
2032 } | |
2033 | |
2034 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2035 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2036 } | |
2037 | |
2038 | |
2039 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2040 Space* res = heap_region_containing(addr); | |
2041 if (res == NULL) | |
2042 res = perm_gen()->space_containing(addr); | |
2043 return res; | |
2044 } | |
2045 | |
2046 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2047 Space* sp = space_containing(addr); | |
2048 if (sp != NULL) { | |
2049 return sp->block_start(addr); | |
2050 } | |
2051 return NULL; | |
2052 } | |
2053 | |
2054 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2055 Space* sp = space_containing(addr); | |
2056 assert(sp != NULL, "block_size of address outside of heap"); | |
2057 return sp->block_size(addr); | |
2058 } | |
2059 | |
2060 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2061 Space* sp = space_containing(addr); | |
2062 return sp->block_is_obj(addr); | |
2063 } | |
2064 | |
2065 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2066 return true; | |
2067 } | |
2068 | |
2069 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2070 return HeapRegion::GrainBytes; | |
2071 } | |
2072 | |
2073 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2074 // Return the remaining space in the cur alloc region, but not less than | |
2075 // the min TLAB size. | |
2076 // Also, no more than half the region size, since we can't allow tlabs to | |
2077 // grow big enough to accomodate humongous objects. | |
2078 | |
2079 // We need to story it locally, since it might change between when we | |
2080 // test for NULL and when we use it later. | |
2081 ContiguousSpace* cur_alloc_space = _cur_alloc_region; | |
2082 if (cur_alloc_space == NULL) { | |
2083 return HeapRegion::GrainBytes/2; | |
2084 } else { | |
2085 return MAX2(MIN2(cur_alloc_space->free(), | |
2086 (size_t)(HeapRegion::GrainBytes/2)), | |
2087 (size_t)MinTLABSize); | |
2088 } | |
2089 } | |
2090 | |
2091 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
2092 bool dummy; | |
2093 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
2094 } | |
2095 | |
2096 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2097 return false; | |
2098 } | |
2099 | |
2100 size_t G1CollectedHeap::large_typearray_limit() { | |
2101 // FIXME | |
2102 return HeapRegion::GrainBytes/HeapWordSize; | |
2103 } | |
2104 | |
2105 size_t G1CollectedHeap::max_capacity() const { | |
2106 return _g1_committed.byte_size(); | |
2107 } | |
2108 | |
2109 jlong G1CollectedHeap::millis_since_last_gc() { | |
2110 // assert(false, "NYI"); | |
2111 return 0; | |
2112 } | |
2113 | |
2114 | |
2115 void G1CollectedHeap::prepare_for_verify() { | |
2116 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2117 ensure_parsability(false); | |
2118 } | |
2119 g1_rem_set()->prepare_for_verify(); | |
2120 } | |
2121 | |
2122 class VerifyLivenessOopClosure: public OopClosure { | |
2123 G1CollectedHeap* g1h; | |
2124 public: | |
2125 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2126 g1h = _g1h; | |
2127 } | |
2128 void do_oop(narrowOop *p) { | |
2129 guarantee(false, "NYI"); | |
2130 } | |
2131 void do_oop(oop *p) { | |
2132 oop obj = *p; | |
2133 assert(obj == NULL || !g1h->is_obj_dead(obj), | |
2134 "Dead object referenced by a not dead object"); | |
2135 } | |
2136 }; | |
2137 | |
2138 class VerifyObjsInRegionClosure: public ObjectClosure { | |
2139 G1CollectedHeap* _g1h; | |
2140 size_t _live_bytes; | |
2141 HeapRegion *_hr; | |
2142 public: | |
2143 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { | |
2144 _g1h = G1CollectedHeap::heap(); | |
2145 } | |
2146 void do_object(oop o) { | |
2147 VerifyLivenessOopClosure isLive(_g1h); | |
2148 assert(o != NULL, "Huh?"); | |
2149 if (!_g1h->is_obj_dead(o)) { | |
2150 o->oop_iterate(&isLive); | |
2151 if (!_hr->obj_allocated_since_prev_marking(o)) | |
2152 _live_bytes += (o->size() * HeapWordSize); | |
2153 } | |
2154 } | |
2155 size_t live_bytes() { return _live_bytes; } | |
2156 }; | |
2157 | |
2158 class PrintObjsInRegionClosure : public ObjectClosure { | |
2159 HeapRegion *_hr; | |
2160 G1CollectedHeap *_g1; | |
2161 public: | |
2162 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2163 _g1 = G1CollectedHeap::heap(); | |
2164 }; | |
2165 | |
2166 void do_object(oop o) { | |
2167 if (o != NULL) { | |
2168 HeapWord *start = (HeapWord *) o; | |
2169 size_t word_sz = o->size(); | |
2170 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2171 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2172 (void*) o, word_sz, | |
2173 _g1->isMarkedPrev(o), | |
2174 _g1->isMarkedNext(o), | |
2175 _hr->obj_allocated_since_prev_marking(o)); | |
2176 HeapWord *end = start + word_sz; | |
2177 HeapWord *cur; | |
2178 int *val; | |
2179 for (cur = start; cur < end; cur++) { | |
2180 val = (int *) cur; | |
2181 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2182 } | |
2183 } | |
2184 } | |
2185 }; | |
2186 | |
2187 class VerifyRegionClosure: public HeapRegionClosure { | |
2188 public: | |
2189 bool _allow_dirty; | |
390 | 2190 bool _par; |
2191 VerifyRegionClosure(bool allow_dirty, bool par = false) | |
2192 : _allow_dirty(allow_dirty), _par(par) {} | |
342 | 2193 bool doHeapRegion(HeapRegion* r) { |
390 | 2194 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2195 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2196 if (!r->continuesHumongous()) { |
342 | 2197 VerifyObjsInRegionClosure not_dead_yet_cl(r); |
2198 r->verify(_allow_dirty); | |
2199 r->object_iterate(¬_dead_yet_cl); | |
2200 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), | |
2201 "More live objects than counted in last complete marking."); | |
2202 } | |
2203 return false; | |
2204 } | |
2205 }; | |
2206 | |
2207 class VerifyRootsClosure: public OopsInGenClosure { | |
2208 private: | |
2209 G1CollectedHeap* _g1h; | |
2210 bool _failures; | |
2211 | |
2212 public: | |
2213 VerifyRootsClosure() : | |
2214 _g1h(G1CollectedHeap::heap()), _failures(false) { } | |
2215 | |
2216 bool failures() { return _failures; } | |
2217 | |
2218 void do_oop(narrowOop* p) { | |
2219 guarantee(false, "NYI"); | |
2220 } | |
2221 | |
2222 void do_oop(oop* p) { | |
2223 oop obj = *p; | |
2224 if (obj != NULL) { | |
2225 if (_g1h->is_obj_dead(obj)) { | |
2226 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " | |
2227 "points to dead obj "PTR_FORMAT, p, (void*) obj); | |
2228 obj->print_on(gclog_or_tty); | |
2229 _failures = true; | |
2230 } | |
2231 } | |
2232 } | |
2233 }; | |
2234 | |
390 | 2235 // This is the task used for parallel heap verification. |
2236 | |
2237 class G1ParVerifyTask: public AbstractGangTask { | |
2238 private: | |
2239 G1CollectedHeap* _g1h; | |
2240 bool _allow_dirty; | |
2241 | |
2242 public: | |
2243 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : | |
2244 AbstractGangTask("Parallel verify task"), | |
2245 _g1h(g1h), _allow_dirty(allow_dirty) { } | |
2246 | |
2247 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2248 HandleMark hm; |
390 | 2249 VerifyRegionClosure blk(_allow_dirty, true); |
2250 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, | |
2251 HeapRegion::ParVerifyClaimValue); | |
2252 } | |
2253 }; | |
2254 | |
342 | 2255 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
2256 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2257 if (!silent) { gclog_or_tty->print("roots "); } | |
2258 VerifyRootsClosure rootsCl; | |
2259 process_strong_roots(false, | |
2260 SharedHeap::SO_AllClasses, | |
2261 &rootsCl, | |
2262 &rootsCl); | |
2263 rem_set()->invalidate(perm_gen()->used_region(), false); | |
2264 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2265 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2266 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2267 "sanity check"); | |
2268 | |
2269 G1ParVerifyTask task(this, allow_dirty); | |
2270 int n_workers = workers()->total_workers(); | |
2271 set_par_threads(n_workers); | |
2272 workers()->run_task(&task); | |
2273 set_par_threads(0); | |
2274 | |
2275 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2276 "sanity check"); | |
2277 | |
2278 reset_heap_region_claim_values(); | |
2279 | |
2280 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2281 "sanity check"); | |
2282 } else { | |
2283 VerifyRegionClosure blk(allow_dirty); | |
2284 _hrs->iterate(&blk); | |
2285 } | |
342 | 2286 if (!silent) gclog_or_tty->print("remset "); |
2287 rem_set()->verify(); | |
2288 guarantee(!rootsCl.failures(), "should not have had failures"); | |
2289 } else { | |
2290 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2291 } | |
2292 } | |
2293 | |
2294 class PrintRegionClosure: public HeapRegionClosure { | |
2295 outputStream* _st; | |
2296 public: | |
2297 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2298 bool doHeapRegion(HeapRegion* r) { | |
2299 r->print_on(_st); | |
2300 return false; | |
2301 } | |
2302 }; | |
2303 | |
2304 void G1CollectedHeap::print() const { print_on(gclog_or_tty); } | |
2305 | |
2306 void G1CollectedHeap::print_on(outputStream* st) const { | |
2307 PrintRegionClosure blk(st); | |
2308 _hrs->iterate(&blk); | |
2309 } | |
2310 | |
794 | 2311 class PrintOnThreadsClosure : public ThreadClosure { |
2312 outputStream* _st; | |
2313 public: | |
2314 PrintOnThreadsClosure(outputStream* st) : _st(st) { } | |
2315 virtual void do_thread(Thread *t) { | |
2316 t->print_on(_st); | |
2317 } | |
2318 }; | |
2319 | |
342 | 2320 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { |
2321 if (ParallelGCThreads > 0) { | |
2322 workers()->print_worker_threads(); | |
2323 } | |
2324 st->print("\"G1 concurrent mark GC Thread\" "); | |
2325 _cmThread->print(); | |
2326 st->cr(); | |
794 | 2327 st->print("\"G1 concurrent refinement GC Threads\" "); |
2328 PrintOnThreadsClosure p(st); | |
2329 _cg1r->threads_do(&p); | |
342 | 2330 st->cr(); |
2331 st->print("\"G1 zero-fill GC Thread\" "); | |
2332 _czft->print_on(st); | |
2333 st->cr(); | |
2334 } | |
2335 | |
2336 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2337 if (ParallelGCThreads > 0) { | |
2338 workers()->threads_do(tc); | |
2339 } | |
2340 tc->do_thread(_cmThread); | |
794 | 2341 _cg1r->threads_do(tc); |
342 | 2342 tc->do_thread(_czft); |
2343 } | |
2344 | |
2345 void G1CollectedHeap::print_tracing_info() const { | |
2346 concurrent_g1_refine()->print_final_card_counts(); | |
2347 | |
2348 // We'll overload this to mean "trace GC pause statistics." | |
2349 if (TraceGen0Time || TraceGen1Time) { | |
2350 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2351 // to that. | |
2352 g1_policy()->print_tracing_info(); | |
2353 } | |
751 | 2354 if (G1SummarizeRSetStats) { |
342 | 2355 g1_rem_set()->print_summary_info(); |
2356 } | |
751 | 2357 if (G1SummarizeConcurrentMark) { |
342 | 2358 concurrent_mark()->print_summary_info(); |
2359 } | |
751 | 2360 if (G1SummarizeZFStats) { |
342 | 2361 ConcurrentZFThread::print_summary_info(); |
2362 } | |
2363 g1_policy()->print_yg_surv_rate_info(); | |
2364 | |
2365 GCOverheadReporter::printGCOverhead(); | |
2366 | |
2367 SpecializationStats::print(); | |
2368 } | |
2369 | |
2370 | |
2371 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2372 HeapRegion* hr = heap_region_containing(addr); | |
2373 if (hr == NULL) { | |
2374 return 0; | |
2375 } else { | |
2376 return 1; | |
2377 } | |
2378 } | |
2379 | |
2380 G1CollectedHeap* G1CollectedHeap::heap() { | |
2381 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2382 "not a garbage-first heap"); | |
2383 return _g1h; | |
2384 } | |
2385 | |
2386 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
2387 if (PrintHeapAtGC){ | |
2388 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); | |
2389 Universe::print(); | |
2390 } | |
2391 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
2392 // Call allocation profiler | |
2393 AllocationProfiler::iterate_since_last_gc(); | |
2394 // Fill TLAB's and such | |
2395 ensure_parsability(true); | |
2396 } | |
2397 | |
2398 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2399 // FIXME: what is this about? | |
2400 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2401 // is set. | |
2402 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2403 "derived pointer present")); | |
2404 | |
2405 if (PrintHeapAtGC){ | |
2406 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); | |
2407 Universe::print(); | |
2408 gclog_or_tty->print("} "); | |
2409 } | |
2410 } | |
2411 | |
2412 void G1CollectedHeap::do_collection_pause() { | |
2413 // Read the GC count while holding the Heap_lock | |
2414 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2415 // ensure that we do not give up the heap lock and potentially | |
2416 // pick up the wrong count | |
2417 int gc_count_before = SharedHeap::heap()->total_collections(); | |
2418 | |
2419 // Don't want to do a GC pause while cleanup is being completed! | |
2420 wait_for_cleanup_complete(); | |
2421 | |
2422 g1_policy()->record_stop_world_start(); | |
2423 { | |
2424 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
2425 VM_G1IncCollectionPause op(gc_count_before); | |
2426 VMThread::execute(&op); | |
2427 } | |
2428 } | |
2429 | |
2430 void | |
2431 G1CollectedHeap::doConcurrentMark() { | |
2432 if (G1ConcMark) { | |
2433 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2434 if (!_cmThread->in_progress()) { | |
2435 _cmThread->set_started(); | |
2436 CGC_lock->notify(); | |
2437 } | |
2438 } | |
2439 } | |
2440 | |
2441 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2442 G1CollectedHeap* _g1h; | |
2443 public: | |
2444 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2445 void do_object(oop obj) { | |
2446 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2447 "markandsweep mark should agree with concurrent deadness"); | |
2448 } | |
2449 }; | |
2450 | |
2451 void | |
2452 G1CollectedHeap::checkConcurrentMark() { | |
2453 VerifyMarkedObjsClosure verifycl(this); | |
2454 // MutexLockerEx x(getMarkBitMapLock(), | |
2455 // Mutex::_no_safepoint_check_flag); | |
678 | 2456 object_iterate(&verifycl, false); |
342 | 2457 } |
2458 | |
2459 void G1CollectedHeap::do_sync_mark() { | |
2460 _cm->checkpointRootsInitial(); | |
2461 _cm->markFromRoots(); | |
2462 _cm->checkpointRootsFinal(false); | |
2463 } | |
2464 | |
2465 // <NEW PREDICTION> | |
2466 | |
2467 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2468 bool young) { | |
2469 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2470 } | |
2471 | |
2472 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2473 predicted_time_ms) { | |
2474 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2475 } | |
2476 | |
2477 size_t G1CollectedHeap::pending_card_num() { | |
2478 size_t extra_cards = 0; | |
2479 JavaThread *curr = Threads::first(); | |
2480 while (curr != NULL) { | |
2481 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2482 extra_cards += dcq.size(); | |
2483 curr = curr->next(); | |
2484 } | |
2485 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2486 size_t buffer_size = dcqs.buffer_size(); | |
2487 size_t buffer_num = dcqs.completed_buffers_num(); | |
2488 return buffer_size * buffer_num + extra_cards; | |
2489 } | |
2490 | |
2491 size_t G1CollectedHeap::max_pending_card_num() { | |
2492 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2493 size_t buffer_size = dcqs.buffer_size(); | |
2494 size_t buffer_num = dcqs.completed_buffers_num(); | |
2495 int thread_num = Threads::number_of_threads(); | |
2496 return (buffer_num + thread_num) * buffer_size; | |
2497 } | |
2498 | |
2499 size_t G1CollectedHeap::cards_scanned() { | |
2500 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2501 return g1_rset->cardsScanned(); | |
2502 } | |
2503 | |
2504 void | |
2505 G1CollectedHeap::setup_surviving_young_words() { | |
2506 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2507 size_t array_length = g1_policy()->young_cset_length(); | |
2508 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2509 if (_surviving_young_words == NULL) { | |
2510 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2511 "Not enough space for young surv words summary."); | |
2512 } | |
2513 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
2514 for (size_t i = 0; i < array_length; ++i) { | |
2515 guarantee( _surviving_young_words[i] == 0, "invariant" ); | |
2516 } | |
2517 } | |
2518 | |
2519 void | |
2520 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2521 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2522 size_t array_length = g1_policy()->young_cset_length(); | |
2523 for (size_t i = 0; i < array_length; ++i) | |
2524 _surviving_young_words[i] += surv_young_words[i]; | |
2525 } | |
2526 | |
2527 void | |
2528 G1CollectedHeap::cleanup_surviving_young_words() { | |
2529 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2530 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2531 _surviving_young_words = NULL; | |
2532 } | |
2533 | |
2534 // </NEW PREDICTION> | |
2535 | |
2536 void | |
677 | 2537 G1CollectedHeap::do_collection_pause_at_safepoint() { |
342 | 2538 char verbose_str[128]; |
2539 sprintf(verbose_str, "GC pause "); | |
677 | 2540 if (g1_policy()->in_young_gc_mode()) { |
342 | 2541 if (g1_policy()->full_young_gcs()) |
2542 strcat(verbose_str, "(young)"); | |
2543 else | |
2544 strcat(verbose_str, "(partial)"); | |
2545 } | |
2546 if (g1_policy()->should_initiate_conc_mark()) | |
2547 strcat(verbose_str, " (initial-mark)"); | |
2548 | |
677 | 2549 GCCauseSetter x(this, GCCause::_g1_inc_collection_pause); |
342 | 2550 |
2551 // if PrintGCDetails is on, we'll print long statistics information | |
2552 // in the collector policy code, so let's not print this as the output | |
2553 // is messy if we do. | |
2554 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
2555 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
2556 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
2557 | |
2558 ResourceMark rm; | |
2559 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
2560 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
2561 guarantee(!is_gc_active(), "collection is not reentrant"); | |
2562 assert(regions_accounted_for(), "Region leakage!"); | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
352
diff
changeset
|
2563 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
352
diff
changeset
|
2564 increment_gc_time_stamp(); |
342 | 2565 |
2566 if (g1_policy()->in_young_gc_mode()) { | |
2567 assert(check_young_list_well_formed(), | |
2568 "young list should be well formed"); | |
2569 } | |
2570 | |
2571 if (GC_locker::is_active()) { | |
2572 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
2573 } | |
2574 | |
2575 bool abandoned = false; | |
2576 { // Call to jvmpi::post_class_unload_events must occur outside of active GC | |
2577 IsGCActiveMark x; | |
2578 | |
2579 gc_prologue(false); | |
2580 increment_total_collections(); | |
2581 | |
2582 #if G1_REM_SET_LOGGING | |
2583 gclog_or_tty->print_cr("\nJust chose CS, heap:"); | |
2584 print(); | |
2585 #endif | |
2586 | |
2587 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
2588 HandleMark hm; // Discard invalid handles created during verification | |
2589 prepare_for_verify(); | |
2590 gclog_or_tty->print(" VerifyBeforeGC:"); | |
2591 Universe::verify(false); | |
2592 } | |
2593 | |
2594 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
2595 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
393
diff
changeset
|
2596 // We want to turn off ref discovery, if necessary, and turn it back on |
342 | 2597 // on again later if we do. |
2598 bool was_enabled = ref_processor()->discovery_enabled(); | |
2599 if (was_enabled) ref_processor()->disable_discovery(); | |
2600 | |
2601 // Forget the current alloc region (we might even choose it to be part | |
2602 // of the collection set!). | |
2603 abandon_cur_alloc_region(); | |
2604 | |
2605 // The elapsed time induced by the start time below deliberately elides | |
2606 // the possible verification above. | |
2607 double start_time_sec = os::elapsedTime(); | |
2608 GCOverheadReporter::recordSTWStart(start_time_sec); | |
2609 size_t start_used_bytes = used(); | |
2610 if (!G1ConcMark) { | |
2611 do_sync_mark(); | |
2612 } | |
2613 | |
2614 g1_policy()->record_collection_pause_start(start_time_sec, | |
2615 start_used_bytes); | |
2616 | |
526 | 2617 guarantee(_in_cset_fast_test == NULL, "invariant"); |
2618 guarantee(_in_cset_fast_test_base == NULL, "invariant"); | |
618
ae1579717a57
6812428: G1: Error: assert(ret || obj_in_cs(obj),"sanity")
tonyp
parents:
616
diff
changeset
|
2619 _in_cset_fast_test_length = max_regions(); |
526 | 2620 _in_cset_fast_test_base = |
2621 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); | |
2622 memset(_in_cset_fast_test_base, false, | |
2623 _in_cset_fast_test_length * sizeof(bool)); | |
2624 // We're biasing _in_cset_fast_test to avoid subtracting the | |
2625 // beginning of the heap every time we want to index; basically | |
2626 // it's the same with what we do with the card table. | |
2627 _in_cset_fast_test = _in_cset_fast_test_base - | |
2628 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); | |
2629 | |
342 | 2630 #if SCAN_ONLY_VERBOSE |
2631 _young_list->print(); | |
2632 #endif // SCAN_ONLY_VERBOSE | |
2633 | |
2634 if (g1_policy()->should_initiate_conc_mark()) { | |
2635 concurrent_mark()->checkpointRootsInitialPre(); | |
2636 } | |
2637 save_marks(); | |
2638 | |
605 | 2639 // We must do this before any possible evacuation that should propagate |
677 | 2640 // marks. |
342 | 2641 if (mark_in_progress()) { |
2642 double start_time_sec = os::elapsedTime(); | |
2643 | |
2644 _cm->drainAllSATBBuffers(); | |
2645 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; | |
2646 g1_policy()->record_satb_drain_time(finish_mark_ms); | |
2647 | |
2648 } | |
2649 // Record the number of elements currently on the mark stack, so we | |
2650 // only iterate over these. (Since evacuation may add to the mark | |
2651 // stack, doing more exposes race conditions.) If no mark is in | |
2652 // progress, this will be zero. | |
2653 _cm->set_oops_do_bound(); | |
2654 | |
2655 assert(regions_accounted_for(), "Region leakage."); | |
2656 | |
2657 if (mark_in_progress()) | |
2658 concurrent_mark()->newCSet(); | |
2659 | |
2660 // Now choose the CS. | |
677 | 2661 g1_policy()->choose_collection_set(); |
2662 | |
342 | 2663 // We may abandon a pause if we find no region that will fit in the MMU |
2664 // pause. | |
677 | 2665 bool abandoned = (g1_policy()->collection_set() == NULL); |
342 | 2666 |
2667 // Nothing to do if we were unable to choose a collection set. | |
2668 if (!abandoned) { | |
2669 #if G1_REM_SET_LOGGING | |
2670 gclog_or_tty->print_cr("\nAfter pause, heap:"); | |
2671 print(); | |
2672 #endif | |
2673 | |
2674 setup_surviving_young_words(); | |
2675 | |
2676 // Set up the gc allocation regions. | |
2677 get_gc_alloc_regions(); | |
2678 | |
2679 // Actually do the work... | |
2680 evacuate_collection_set(); | |
2681 free_collection_set(g1_policy()->collection_set()); | |
2682 g1_policy()->clear_collection_set(); | |
2683 | |
526 | 2684 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); |
2685 // this is more for peace of mind; we're nulling them here and | |
2686 // we're expecting them to be null at the beginning of the next GC | |
2687 _in_cset_fast_test = NULL; | |
2688 _in_cset_fast_test_base = NULL; | |
2689 | |
636 | 2690 release_gc_alloc_regions(false /* totally */); |
342 | 2691 |
2692 cleanup_surviving_young_words(); | |
2693 | |
2694 if (g1_policy()->in_young_gc_mode()) { | |
2695 _young_list->reset_sampled_info(); | |
2696 assert(check_young_list_empty(true), | |
2697 "young list should be empty"); | |
2698 | |
2699 #if SCAN_ONLY_VERBOSE | |
2700 _young_list->print(); | |
2701 #endif // SCAN_ONLY_VERBOSE | |
2702 | |
545 | 2703 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
2704 _young_list->first_survivor_region(), | |
2705 _young_list->last_survivor_region()); | |
342 | 2706 _young_list->reset_auxilary_lists(); |
2707 } | |
2708 } else { | |
2709 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
2710 } | |
2711 | |
2712 if (evacuation_failed()) { | |
2713 _summary_bytes_used = recalculate_used(); | |
2714 } else { | |
2715 // The "used" of the the collection set have already been subtracted | |
2716 // when they were freed. Add in the bytes evacuated. | |
2717 _summary_bytes_used += g1_policy()->bytes_in_to_space(); | |
2718 } | |
2719 | |
2720 if (g1_policy()->in_young_gc_mode() && | |
2721 g1_policy()->should_initiate_conc_mark()) { | |
2722 concurrent_mark()->checkpointRootsInitialPost(); | |
2723 set_marking_started(); | |
2724 doConcurrentMark(); | |
2725 } | |
2726 | |
2727 #if SCAN_ONLY_VERBOSE | |
2728 _young_list->print(); | |
2729 #endif // SCAN_ONLY_VERBOSE | |
2730 | |
2731 double end_time_sec = os::elapsedTime(); | |
595
3698e8f47799
6804746: G1: guarantee(variance() > -1.0,"variance should be >= 0") (due to evacuation failure)
tonyp
parents:
546
diff
changeset
|
2732 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
3698e8f47799
6804746: G1: guarantee(variance() > -1.0,"variance should be >= 0") (due to evacuation failure)
tonyp
parents:
546
diff
changeset
|
2733 g1_policy()->record_pause_time_ms(pause_time_ms); |
342 | 2734 GCOverheadReporter::recordSTWEnd(end_time_sec); |
677 | 2735 g1_policy()->record_collection_pause_end(abandoned); |
342 | 2736 |
2737 assert(regions_accounted_for(), "Region leakage."); | |
2738 | |
2739 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
2740 HandleMark hm; // Discard invalid handles created during verification | |
2741 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2742 prepare_for_verify(); |
342 | 2743 Universe::verify(false); |
2744 } | |
2745 | |
2746 if (was_enabled) ref_processor()->enable_discovery(); | |
2747 | |
2748 { | |
2749 size_t expand_bytes = g1_policy()->expansion_amount(); | |
2750 if (expand_bytes > 0) { | |
2751 size_t bytes_before = capacity(); | |
2752 expand(expand_bytes); | |
2753 } | |
2754 } | |
2755 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2756 if (mark_in_progress()) { |
342 | 2757 concurrent_mark()->update_g1_committed(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2758 } |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2759 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2760 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2761 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2762 #endif |
342 | 2763 |
2764 gc_epilogue(false); | |
2765 } | |
2766 | |
2767 assert(verify_region_lists(), "Bad region lists."); | |
2768 | |
2769 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { | |
2770 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
2771 print_tracing_info(); | |
2772 vm_exit(-1); | |
2773 } | |
2774 } | |
2775 | |
2776 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { | |
2777 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 2778 // make sure we don't call set_gc_alloc_region() multiple times on |
2779 // the same region | |
2780 assert(r == NULL || !r->is_gc_alloc_region(), | |
2781 "shouldn't already be a GC alloc region"); | |
342 | 2782 HeapWord* original_top = NULL; |
2783 if (r != NULL) | |
2784 original_top = r->top(); | |
2785 | |
2786 // We will want to record the used space in r as being there before gc. | |
2787 // One we install it as a GC alloc region it's eligible for allocation. | |
2788 // So record it now and use it later. | |
2789 size_t r_used = 0; | |
2790 if (r != NULL) { | |
2791 r_used = r->used(); | |
2792 | |
2793 if (ParallelGCThreads > 0) { | |
2794 // need to take the lock to guard against two threads calling | |
2795 // get_gc_alloc_region concurrently (very unlikely but...) | |
2796 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2797 r->save_marks(); | |
2798 } | |
2799 } | |
2800 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
2801 _gc_alloc_regions[purpose] = r; | |
2802 if (old_alloc_region != NULL) { | |
2803 // Replace aliases too. | |
2804 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2805 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
2806 _gc_alloc_regions[ap] = r; | |
2807 } | |
2808 } | |
2809 } | |
2810 if (r != NULL) { | |
2811 push_gc_alloc_region(r); | |
2812 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
2813 // We are using a region as a GC alloc region after it has been used | |
2814 // as a mutator allocation region during the current marking cycle. | |
2815 // The mutator-allocated objects are currently implicitly marked, but | |
2816 // when we move hr->next_top_at_mark_start() forward at the the end | |
2817 // of the GC pause, they won't be. We therefore mark all objects in | |
2818 // the "gap". We do this object-by-object, since marking densely | |
2819 // does not currently work right with marking bitmap iteration. This | |
2820 // means we rely on TLAB filling at the start of pauses, and no | |
2821 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
2822 // to fix the marking bitmap iteration. | |
2823 HeapWord* curhw = r->next_top_at_mark_start(); | |
2824 HeapWord* t = original_top; | |
2825 | |
2826 while (curhw < t) { | |
2827 oop cur = (oop)curhw; | |
2828 // We'll assume parallel for generality. This is rare code. | |
2829 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
2830 curhw = curhw + cur->size(); | |
2831 } | |
2832 assert(curhw == t, "Should have parsed correctly."); | |
2833 } | |
2834 if (G1PolicyVerbose > 1) { | |
2835 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
2836 "for survivors:", r->bottom(), original_top, r->end()); | |
2837 r->print(); | |
2838 } | |
2839 g1_policy()->record_before_bytes(r_used); | |
2840 } | |
2841 } | |
2842 | |
2843 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
2844 assert(Thread::current()->is_VM_thread() || | |
2845 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
2846 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
2847 "Precondition."); | |
2848 hr->set_is_gc_alloc_region(true); | |
2849 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
2850 _gc_alloc_region_list = hr; | |
2851 } | |
2852 | |
2853 #ifdef G1_DEBUG | |
2854 class FindGCAllocRegion: public HeapRegionClosure { | |
2855 public: | |
2856 bool doHeapRegion(HeapRegion* r) { | |
2857 if (r->is_gc_alloc_region()) { | |
2858 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
2859 r->hrs_index(), r->bottom()); | |
2860 } | |
2861 return false; | |
2862 } | |
2863 }; | |
2864 #endif // G1_DEBUG | |
2865 | |
2866 void G1CollectedHeap::forget_alloc_region_list() { | |
2867 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
2868 while (_gc_alloc_region_list != NULL) { | |
2869 HeapRegion* r = _gc_alloc_region_list; | |
2870 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2871 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2872 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2873 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2874 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2875 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2876 r->ContiguousSpace::set_saved_mark(); |
342 | 2877 _gc_alloc_region_list = r->next_gc_alloc_region(); |
2878 r->set_next_gc_alloc_region(NULL); | |
2879 r->set_is_gc_alloc_region(false); | |
545 | 2880 if (r->is_survivor()) { |
2881 if (r->is_empty()) { | |
2882 r->set_not_young(); | |
2883 } else { | |
2884 _young_list->add_survivor_region(r); | |
2885 } | |
2886 } | |
342 | 2887 if (r->is_empty()) { |
2888 ++_free_regions; | |
2889 } | |
2890 } | |
2891 #ifdef G1_DEBUG | |
2892 FindGCAllocRegion fa; | |
2893 heap_region_iterate(&fa); | |
2894 #endif // G1_DEBUG | |
2895 } | |
2896 | |
2897 | |
2898 bool G1CollectedHeap::check_gc_alloc_regions() { | |
2899 // TODO: allocation regions check | |
2900 return true; | |
2901 } | |
2902 | |
2903 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 2904 // First, let's check that the GC alloc region list is empty (it should) |
2905 assert(_gc_alloc_region_list == NULL, "invariant"); | |
2906 | |
342 | 2907 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 2908 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
2909 | |
342 | 2910 // Create new GC alloc regions. |
636 | 2911 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
2912 _retained_gc_alloc_regions[ap] = NULL; | |
2913 | |
2914 if (alloc_region != NULL) { | |
2915 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
2916 | |
2917 // let's make sure that the GC alloc region is not tagged as such | |
2918 // outside a GC operation | |
2919 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
2920 | |
2921 if (alloc_region->in_collection_set() || | |
2922 alloc_region->top() == alloc_region->end() || | |
2923 alloc_region->top() == alloc_region->bottom()) { | |
2924 // we will discard the current GC alloc region if it's in the | |
2925 // collection set (it can happen!), if it's already full (no | |
2926 // point in using it), or if it's empty (this means that it | |
2927 // was emptied during a cleanup and it should be on the free | |
2928 // list now). | |
2929 | |
2930 alloc_region = NULL; | |
2931 } | |
2932 } | |
2933 | |
2934 if (alloc_region == NULL) { | |
2935 // we will get a new GC alloc region | |
342 | 2936 alloc_region = newAllocRegionWithExpansion(ap, 0); |
2937 } | |
636 | 2938 |
342 | 2939 if (alloc_region != NULL) { |
636 | 2940 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 2941 set_gc_alloc_region(ap, alloc_region); |
2942 } | |
636 | 2943 |
2944 assert(_gc_alloc_regions[ap] == NULL || | |
2945 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
2946 "the GC alloc region should be tagged as such"); | |
2947 assert(_gc_alloc_regions[ap] == NULL || | |
2948 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
2949 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 2950 } |
2951 // Set alternative regions for allocation purposes that have reached | |
636 | 2952 // their limit. |
342 | 2953 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
2954 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
2955 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
2956 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
2957 } | |
2958 } | |
2959 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
2960 } | |
2961 | |
636 | 2962 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 2963 // We keep a separate list of all regions that have been alloc regions in |
636 | 2964 // the current collection pause. Forget that now. This method will |
2965 // untag the GC alloc regions and tear down the GC alloc region | |
2966 // list. It's desirable that no regions are tagged as GC alloc | |
2967 // outside GCs. | |
342 | 2968 forget_alloc_region_list(); |
2969 | |
2970 // The current alloc regions contain objs that have survived | |
2971 // collection. Make them no longer GC alloc regions. | |
2972 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2973 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 2974 _retained_gc_alloc_regions[ap] = NULL; |
2975 | |
2976 if (r != NULL) { | |
2977 // we retain nothing on _gc_alloc_regions between GCs | |
2978 set_gc_alloc_region(ap, NULL); | |
2979 _gc_alloc_region_counts[ap] = 0; | |
2980 | |
2981 if (r->is_empty()) { | |
2982 // we didn't actually allocate anything in it; let's just put | |
2983 // it on the free list | |
342 | 2984 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
2985 r->set_zero_fill_complete(); | |
2986 put_free_region_on_list_locked(r); | |
636 | 2987 } else if (_retain_gc_alloc_region[ap] && !totally) { |
2988 // retain it so that we can use it at the beginning of the next GC | |
2989 _retained_gc_alloc_regions[ap] = r; | |
342 | 2990 } |
2991 } | |
636 | 2992 } |
2993 } | |
2994 | |
2995 #ifndef PRODUCT | |
2996 // Useful for debugging | |
2997 | |
2998 void G1CollectedHeap::print_gc_alloc_regions() { | |
2999 gclog_or_tty->print_cr("GC alloc regions"); | |
3000 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3001 HeapRegion* r = _gc_alloc_regions[ap]; | |
3002 if (r == NULL) { | |
3003 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3004 } else { | |
3005 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3006 ap, r->bottom(), r->used()); | |
3007 } | |
3008 } | |
3009 } | |
3010 #endif // PRODUCT | |
342 | 3011 |
3012 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3013 _drain_in_progress = false; | |
3014 set_evac_failure_closure(cl); | |
3015 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3016 } | |
3017 | |
3018 void G1CollectedHeap::finalize_for_evac_failure() { | |
3019 assert(_evac_failure_scan_stack != NULL && | |
3020 _evac_failure_scan_stack->length() == 0, | |
3021 "Postcondition"); | |
3022 assert(!_drain_in_progress, "Postcondition"); | |
3023 // Don't have to delete, since the scan stack is a resource object. | |
3024 _evac_failure_scan_stack = NULL; | |
3025 } | |
3026 | |
3027 | |
3028 | |
3029 // *** Sequential G1 Evacuation | |
3030 | |
3031 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
3032 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3033 // let the caller handle alloc failure | |
3034 if (alloc_region == NULL) return NULL; | |
3035 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
3036 "Either the object is humongous or the region isn't"); | |
3037 HeapWord* block = alloc_region->allocate(word_size); | |
3038 if (block == NULL) { | |
3039 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
3040 } | |
3041 return block; | |
3042 } | |
3043 | |
3044 class G1IsAliveClosure: public BoolObjectClosure { | |
3045 G1CollectedHeap* _g1; | |
3046 public: | |
3047 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3048 void do_object(oop p) { assert(false, "Do not call."); } | |
3049 bool do_object_b(oop p) { | |
3050 // It is reachable if it is outside the collection set, or is inside | |
3051 // and forwarded. | |
3052 | |
3053 #ifdef G1_DEBUG | |
3054 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3055 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3056 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3057 #endif // G1_DEBUG | |
3058 | |
3059 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3060 } | |
3061 }; | |
3062 | |
3063 class G1KeepAliveClosure: public OopClosure { | |
3064 G1CollectedHeap* _g1; | |
3065 public: | |
3066 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3067 void do_oop(narrowOop* p) { | |
3068 guarantee(false, "NYI"); | |
3069 } | |
3070 void do_oop(oop* p) { | |
3071 oop obj = *p; | |
3072 #ifdef G1_DEBUG | |
3073 if (PrintGC && Verbose) { | |
3074 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3075 p, (void*) obj, (void*) *p); | |
3076 } | |
3077 #endif // G1_DEBUG | |
3078 | |
3079 if (_g1->obj_in_cs(obj)) { | |
3080 assert( obj->is_forwarded(), "invariant" ); | |
3081 *p = obj->forwardee(); | |
3082 | |
3083 #ifdef G1_DEBUG | |
3084 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3085 (void*) obj, (void*) *p); | |
3086 #endif // G1_DEBUG | |
3087 } | |
3088 } | |
3089 }; | |
3090 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3091 class UpdateRSetImmediate : public OopsInHeapRegionClosure { |
342 | 3092 private: |
3093 G1CollectedHeap* _g1; | |
3094 G1RemSet* _g1_rem_set; | |
3095 public: | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3096 UpdateRSetImmediate(G1CollectedHeap* g1) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3097 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} |
342 | 3098 |
3099 void do_oop(narrowOop* p) { | |
3100 guarantee(false, "NYI"); | |
3101 } | |
3102 void do_oop(oop* p) { | |
3103 assert(_from->is_in_reserved(p), "paranoia"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3104 if (*p != NULL && !_from->is_survivor()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3105 _g1_rem_set->par_write_ref(_from, p, 0); |
342 | 3106 } |
3107 } | |
3108 }; | |
3109 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3110 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3111 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3112 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3113 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3114 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3115 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3116 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3117 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3118 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3119 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3120 void do_oop(narrowOop* p) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3121 guarantee(false, "NYI"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3122 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3123 void do_oop(oop* p) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3124 assert(_from->is_in_reserved(p), "paranoia"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3125 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3126 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3127 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3128 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3129 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3130 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3131 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3132 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3133 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3134 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3135 |
342 | 3136 class RemoveSelfPointerClosure: public ObjectClosure { |
3137 private: | |
3138 G1CollectedHeap* _g1; | |
3139 ConcurrentMark* _cm; | |
3140 HeapRegion* _hr; | |
3141 size_t _prev_marked_bytes; | |
3142 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3143 OopsInHeapRegionClosure *_cl; |
342 | 3144 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3145 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3146 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3147 _next_marked_bytes(0), _cl(cl) {} |
342 | 3148 |
3149 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3150 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3151 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3152 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3153 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3154 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3155 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3156 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3157 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3158 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3159 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3160 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3161 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3162 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3163 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3164 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3165 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3166 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3167 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3168 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3169 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3170 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3171 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3172 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3173 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3174 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3175 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3176 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3177 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3178 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3179 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3180 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3181 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3182 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3183 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3184 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3185 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3186 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3187 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3188 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3189 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3190 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3191 CollectedHeap::fill_with_object(mr); |
342 | 3192 _cm->clearRangeBothMaps(mr); |
3193 } | |
3194 } | |
3195 }; | |
3196 | |
3197 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3198 UpdateRSetImmediate immediate_update(_g1h); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3199 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3200 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3201 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3202 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3203 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3204 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3205 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3206 } |
342 | 3207 HeapRegion* cur = g1_policy()->collection_set(); |
3208 while (cur != NULL) { | |
3209 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3210 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3211 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3212 if (cur->evacuation_failed()) { |
3213 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3214 cl->set_region(cur); |
342 | 3215 cur->object_iterate(&rspc); |
3216 | |
3217 // A number of manipulations to make the TAMS be the current top, | |
3218 // and the marked bytes be the ones observed in the iteration. | |
3219 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3220 // The comments below are the postconditions achieved by the | |
3221 // calls. Note especially the last such condition, which says that | |
3222 // the count of marked bytes has been properly restored. | |
3223 cur->note_start_of_marking(false); | |
3224 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3225 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3226 // _next_marked_bytes == prev_marked_bytes. | |
3227 cur->note_end_of_marking(); | |
3228 // _prev_top_at_mark_start == top(), | |
3229 // _prev_marked_bytes == prev_marked_bytes | |
3230 } | |
3231 // If there is no mark in progress, we modified the _next variables | |
3232 // above needlessly, but harmlessly. | |
3233 if (_g1h->mark_in_progress()) { | |
3234 cur->note_start_of_marking(false); | |
3235 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3236 // _next_marked_bytes == next_marked_bytes. | |
3237 } | |
3238 | |
3239 // Now make sure the region has the right index in the sorted array. | |
3240 g1_policy()->note_change_in_marked_bytes(cur); | |
3241 } | |
3242 cur = cur->next_in_collection_set(); | |
3243 } | |
3244 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3245 | |
3246 // Now restore saved marks, if any. | |
3247 if (_objs_with_preserved_marks != NULL) { | |
3248 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3249 assert(_objs_with_preserved_marks->length() == | |
3250 _preserved_marks_of_objs->length(), "Both or none."); | |
3251 guarantee(_objs_with_preserved_marks->length() == | |
3252 _preserved_marks_of_objs->length(), "Both or none."); | |
3253 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3254 oop obj = _objs_with_preserved_marks->at(i); | |
3255 markOop m = _preserved_marks_of_objs->at(i); | |
3256 obj->set_mark(m); | |
3257 } | |
3258 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3259 delete _objs_with_preserved_marks; | |
3260 delete _preserved_marks_of_objs; | |
3261 _objs_with_preserved_marks = NULL; | |
3262 _preserved_marks_of_objs = NULL; | |
3263 } | |
3264 } | |
3265 | |
3266 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3267 _evac_failure_scan_stack->push(obj); | |
3268 } | |
3269 | |
3270 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3271 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3272 | |
3273 while (_evac_failure_scan_stack->length() > 0) { | |
3274 oop obj = _evac_failure_scan_stack->pop(); | |
3275 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3276 obj->oop_iterate_backwards(_evac_failure_closure); | |
3277 } | |
3278 } | |
3279 | |
3280 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3281 markOop m = old->mark(); | |
3282 // forward to self | |
3283 assert(!old->is_forwarded(), "precondition"); | |
3284 | |
3285 old->forward_to(old); | |
3286 handle_evacuation_failure_common(old, m); | |
3287 } | |
3288 | |
3289 oop | |
3290 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3291 oop old) { | |
3292 markOop m = old->mark(); | |
3293 oop forward_ptr = old->forward_to_atomic(old); | |
3294 if (forward_ptr == NULL) { | |
3295 // Forward-to-self succeeded. | |
3296 if (_evac_failure_closure != cl) { | |
3297 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3298 assert(!_drain_in_progress, | |
3299 "Should only be true while someone holds the lock."); | |
3300 // Set the global evac-failure closure to the current thread's. | |
3301 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3302 set_evac_failure_closure(cl); | |
3303 // Now do the common part. | |
3304 handle_evacuation_failure_common(old, m); | |
3305 // Reset to NULL. | |
3306 set_evac_failure_closure(NULL); | |
3307 } else { | |
3308 // The lock is already held, and this is recursive. | |
3309 assert(_drain_in_progress, "This should only be the recursive case."); | |
3310 handle_evacuation_failure_common(old, m); | |
3311 } | |
3312 return old; | |
3313 } else { | |
3314 // Someone else had a place to copy it. | |
3315 return forward_ptr; | |
3316 } | |
3317 } | |
3318 | |
3319 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3320 set_evacuation_failed(true); | |
3321 | |
3322 preserve_mark_if_necessary(old, m); | |
3323 | |
3324 HeapRegion* r = heap_region_containing(old); | |
3325 if (!r->evacuation_failed()) { | |
3326 r->set_evacuation_failed(true); | |
751 | 3327 if (G1PrintRegions) { |
342 | 3328 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
3329 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3330 r, r->bottom(), r->end()); | |
3331 } | |
3332 } | |
3333 | |
3334 push_on_evac_failure_scan_stack(old); | |
3335 | |
3336 if (!_drain_in_progress) { | |
3337 // prevent recursion in copy_to_survivor_space() | |
3338 _drain_in_progress = true; | |
3339 drain_evac_failure_scan_stack(); | |
3340 _drain_in_progress = false; | |
3341 } | |
3342 } | |
3343 | |
3344 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3345 if (m != markOopDesc::prototype()) { | |
3346 if (_objs_with_preserved_marks == NULL) { | |
3347 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3348 _objs_with_preserved_marks = | |
3349 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3350 _preserved_marks_of_objs = | |
3351 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3352 } | |
3353 _objs_with_preserved_marks->push(obj); | |
3354 _preserved_marks_of_objs->push(m); | |
3355 } | |
3356 } | |
3357 | |
3358 // *** Parallel G1 Evacuation | |
3359 | |
3360 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3361 size_t word_size) { | |
3362 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3363 // let the caller handle alloc failure | |
3364 if (alloc_region == NULL) return NULL; | |
3365 | |
3366 HeapWord* block = alloc_region->par_allocate(word_size); | |
3367 if (block == NULL) { | |
3368 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3369 Mutex::_no_safepoint_check_flag); | |
3370 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3371 } | |
3372 return block; | |
3373 } | |
3374 | |
545 | 3375 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
3376 bool par) { | |
3377 // Another thread might have obtained alloc_region for the given | |
3378 // purpose, and might be attempting to allocate in it, and might | |
3379 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3380 // region below until we're sure the last allocation has happened. | |
3381 // We ensure this by allocating the remaining space with a garbage | |
3382 // object. | |
3383 if (par) par_allocate_remaining_space(alloc_region); | |
3384 // Now we can do the post-GC stuff on the region. | |
3385 alloc_region->note_end_of_copying(); | |
3386 g1_policy()->record_after_bytes(alloc_region->used()); | |
3387 } | |
3388 | |
342 | 3389 HeapWord* |
3390 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3391 HeapRegion* alloc_region, | |
3392 bool par, | |
3393 size_t word_size) { | |
3394 HeapWord* block = NULL; | |
3395 // In the parallel case, a previous thread to obtain the lock may have | |
3396 // already assigned a new gc_alloc_region. | |
3397 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3398 assert(par, "But should only happen in parallel case."); | |
3399 alloc_region = _gc_alloc_regions[purpose]; | |
3400 if (alloc_region == NULL) return NULL; | |
3401 block = alloc_region->par_allocate(word_size); | |
3402 if (block != NULL) return block; | |
3403 // Otherwise, continue; this new region is empty, too. | |
3404 } | |
3405 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 3406 retire_alloc_region(alloc_region, par); |
342 | 3407 |
3408 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3409 // Cannot allocate more regions for the given purpose. | |
3410 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3411 // Is there an alternative? | |
3412 if (purpose != alt_purpose) { | |
3413 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3414 // Has not the alternative region been aliased? | |
545 | 3415 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 3416 // Try to allocate in the alternative region. |
3417 if (par) { | |
3418 block = alt_region->par_allocate(word_size); | |
3419 } else { | |
3420 block = alt_region->allocate(word_size); | |
3421 } | |
3422 // Make an alias. | |
3423 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 3424 if (block != NULL) { |
3425 return block; | |
3426 } | |
3427 retire_alloc_region(alt_region, par); | |
342 | 3428 } |
3429 // Both the allocation region and the alternative one are full | |
3430 // and aliased, replace them with a new allocation region. | |
3431 purpose = alt_purpose; | |
3432 } else { | |
3433 set_gc_alloc_region(purpose, NULL); | |
3434 return NULL; | |
3435 } | |
3436 } | |
3437 | |
3438 // Now allocate a new region for allocation. | |
3439 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3440 | |
3441 // let the caller handle alloc failure | |
3442 if (alloc_region != NULL) { | |
3443 | |
3444 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3445 assert(alloc_region->saved_mark_at_top(), | |
3446 "Mark should have been saved already."); | |
3447 // We used to assert that the region was zero-filled here, but no | |
3448 // longer. | |
3449 | |
3450 // This must be done last: once it's installed, other regions may | |
3451 // allocate in it (without holding the lock.) | |
3452 set_gc_alloc_region(purpose, alloc_region); | |
3453 | |
3454 if (par) { | |
3455 block = alloc_region->par_allocate(word_size); | |
3456 } else { | |
3457 block = alloc_region->allocate(word_size); | |
3458 } | |
3459 // Caller handles alloc failure. | |
3460 } else { | |
3461 // This sets other apis using the same old alloc region to NULL, also. | |
3462 set_gc_alloc_region(purpose, NULL); | |
3463 } | |
3464 return block; // May be NULL. | |
3465 } | |
3466 | |
3467 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3468 HeapWord* block = NULL; | |
3469 size_t free_words; | |
3470 do { | |
3471 free_words = r->free()/HeapWordSize; | |
3472 // If there's too little space, no one can allocate, so we're done. | |
3473 if (free_words < (size_t)oopDesc::header_size()) return; | |
3474 // Otherwise, try to claim it. | |
3475 block = r->par_allocate(free_words); | |
3476 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3477 fill_with_object(block, free_words); |
342 | 3478 } |
3479 | |
3480 #define use_local_bitmaps 1 | |
3481 #define verify_local_bitmaps 0 | |
3482 | |
3483 #ifndef PRODUCT | |
3484 | |
3485 class GCLabBitMap; | |
3486 class GCLabBitMapClosure: public BitMapClosure { | |
3487 private: | |
3488 ConcurrentMark* _cm; | |
3489 GCLabBitMap* _bitmap; | |
3490 | |
3491 public: | |
3492 GCLabBitMapClosure(ConcurrentMark* cm, | |
3493 GCLabBitMap* bitmap) { | |
3494 _cm = cm; | |
3495 _bitmap = bitmap; | |
3496 } | |
3497 | |
3498 virtual bool do_bit(size_t offset); | |
3499 }; | |
3500 | |
3501 #endif // PRODUCT | |
3502 | |
3503 #define oop_buffer_length 256 | |
3504 | |
3505 class GCLabBitMap: public BitMap { | |
3506 private: | |
3507 ConcurrentMark* _cm; | |
3508 | |
3509 int _shifter; | |
3510 size_t _bitmap_word_covers_words; | |
3511 | |
3512 // beginning of the heap | |
3513 HeapWord* _heap_start; | |
3514 | |
3515 // this is the actual start of the GCLab | |
3516 HeapWord* _real_start_word; | |
3517 | |
3518 // this is the actual end of the GCLab | |
3519 HeapWord* _real_end_word; | |
3520 | |
3521 // this is the first word, possibly located before the actual start | |
3522 // of the GCLab, that corresponds to the first bit of the bitmap | |
3523 HeapWord* _start_word; | |
3524 | |
3525 // size of a GCLab in words | |
3526 size_t _gclab_word_size; | |
3527 | |
3528 static int shifter() { | |
3529 return MinObjAlignment - 1; | |
3530 } | |
3531 | |
3532 // how many heap words does a single bitmap word corresponds to? | |
3533 static size_t bitmap_word_covers_words() { | |
3534 return BitsPerWord << shifter(); | |
3535 } | |
3536 | |
3537 static size_t gclab_word_size() { | |
751 | 3538 return G1ParallelGCAllocBufferSize / HeapWordSize; |
342 | 3539 } |
3540 | |
3541 static size_t bitmap_size_in_bits() { | |
3542 size_t bits_in_bitmap = gclab_word_size() >> shifter(); | |
3543 // We are going to ensure that the beginning of a word in this | |
3544 // bitmap also corresponds to the beginning of a word in the | |
3545 // global marking bitmap. To handle the case where a GCLab | |
3546 // starts from the middle of the bitmap, we need to add enough | |
3547 // space (i.e. up to a bitmap word) to ensure that we have | |
3548 // enough bits in the bitmap. | |
3549 return bits_in_bitmap + BitsPerWord - 1; | |
3550 } | |
3551 public: | |
3552 GCLabBitMap(HeapWord* heap_start) | |
3553 : BitMap(bitmap_size_in_bits()), | |
3554 _cm(G1CollectedHeap::heap()->concurrent_mark()), | |
3555 _shifter(shifter()), | |
3556 _bitmap_word_covers_words(bitmap_word_covers_words()), | |
3557 _heap_start(heap_start), | |
3558 _gclab_word_size(gclab_word_size()), | |
3559 _real_start_word(NULL), | |
3560 _real_end_word(NULL), | |
3561 _start_word(NULL) | |
3562 { | |
3563 guarantee( size_in_words() >= bitmap_size_in_words(), | |
3564 "just making sure"); | |
3565 } | |
3566 | |
3567 inline unsigned heapWordToOffset(HeapWord* addr) { | |
3568 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; | |
3569 assert(offset < size(), "offset should be within bounds"); | |
3570 return offset; | |
3571 } | |
3572 | |
3573 inline HeapWord* offsetToHeapWord(size_t offset) { | |
3574 HeapWord* addr = _start_word + (offset << _shifter); | |
3575 assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); | |
3576 return addr; | |
3577 } | |
3578 | |
3579 bool fields_well_formed() { | |
3580 bool ret1 = (_real_start_word == NULL) && | |
3581 (_real_end_word == NULL) && | |
3582 (_start_word == NULL); | |
3583 if (ret1) | |
3584 return true; | |
3585 | |
3586 bool ret2 = _real_start_word >= _start_word && | |
3587 _start_word < _real_end_word && | |
3588 (_real_start_word + _gclab_word_size) == _real_end_word && | |
3589 (_start_word + _gclab_word_size + _bitmap_word_covers_words) | |
3590 > _real_end_word; | |
3591 return ret2; | |
3592 } | |
3593 | |
3594 inline bool mark(HeapWord* addr) { | |
3595 guarantee(use_local_bitmaps, "invariant"); | |
3596 assert(fields_well_formed(), "invariant"); | |
3597 | |
3598 if (addr >= _real_start_word && addr < _real_end_word) { | |
3599 assert(!isMarked(addr), "should not have already been marked"); | |
3600 | |
3601 // first mark it on the bitmap | |
3602 at_put(heapWordToOffset(addr), true); | |
3603 | |
3604 return true; | |
3605 } else { | |
3606 return false; | |
3607 } | |
3608 } | |
3609 | |
3610 inline bool isMarked(HeapWord* addr) { | |
3611 guarantee(use_local_bitmaps, "invariant"); | |
3612 assert(fields_well_formed(), "invariant"); | |
3613 | |
3614 return at(heapWordToOffset(addr)); | |
3615 } | |
3616 | |
3617 void set_buffer(HeapWord* start) { | |
3618 guarantee(use_local_bitmaps, "invariant"); | |
3619 clear(); | |
3620 | |
3621 assert(start != NULL, "invariant"); | |
3622 _real_start_word = start; | |
3623 _real_end_word = start + _gclab_word_size; | |
3624 | |
3625 size_t diff = | |
3626 pointer_delta(start, _heap_start) % _bitmap_word_covers_words; | |
3627 _start_word = start - diff; | |
3628 | |
3629 assert(fields_well_formed(), "invariant"); | |
3630 } | |
3631 | |
3632 #ifndef PRODUCT | |
3633 void verify() { | |
3634 // verify that the marks have been propagated | |
3635 GCLabBitMapClosure cl(_cm, this); | |
3636 iterate(&cl); | |
3637 } | |
3638 #endif // PRODUCT | |
3639 | |
3640 void retire() { | |
3641 guarantee(use_local_bitmaps, "invariant"); | |
3642 assert(fields_well_formed(), "invariant"); | |
3643 | |
3644 if (_start_word != NULL) { | |
3645 CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); | |
3646 | |
3647 // this means that the bitmap was set up for the GCLab | |
3648 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); | |
3649 | |
3650 mark_bitmap->mostly_disjoint_range_union(this, | |
3651 0, // always start from the start of the bitmap | |
3652 _start_word, | |
3653 size_in_words()); | |
3654 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); | |
3655 | |
3656 #ifndef PRODUCT | |
3657 if (use_local_bitmaps && verify_local_bitmaps) | |
3658 verify(); | |
3659 #endif // PRODUCT | |
3660 } else { | |
3661 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); | |
3662 } | |
3663 } | |
3664 | |
3665 static size_t bitmap_size_in_words() { | |
3666 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; | |
3667 } | |
3668 }; | |
3669 | |
3670 #ifndef PRODUCT | |
3671 | |
3672 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3673 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3674 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3675 return true; | |
3676 } | |
3677 | |
3678 #endif // PRODUCT | |
3679 | |
3680 class G1ParGCAllocBuffer: public ParGCAllocBuffer { | |
3681 private: | |
3682 bool _retired; | |
3683 bool _during_marking; | |
3684 GCLabBitMap _bitmap; | |
3685 | |
3686 public: | |
3687 G1ParGCAllocBuffer() : | |
751 | 3688 ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize), |
342 | 3689 _during_marking(G1CollectedHeap::heap()->mark_in_progress()), |
3690 _bitmap(G1CollectedHeap::heap()->reserved_region().start()), | |
3691 _retired(false) | |
3692 { } | |
3693 | |
3694 inline bool mark(HeapWord* addr) { | |
3695 guarantee(use_local_bitmaps, "invariant"); | |
3696 assert(_during_marking, "invariant"); | |
3697 return _bitmap.mark(addr); | |
3698 } | |
3699 | |
3700 inline void set_buf(HeapWord* buf) { | |
3701 if (use_local_bitmaps && _during_marking) | |
3702 _bitmap.set_buffer(buf); | |
3703 ParGCAllocBuffer::set_buf(buf); | |
3704 _retired = false; | |
3705 } | |
3706 | |
3707 inline void retire(bool end_of_gc, bool retain) { | |
3708 if (_retired) | |
3709 return; | |
3710 if (use_local_bitmaps && _during_marking) { | |
3711 _bitmap.retire(); | |
3712 } | |
3713 ParGCAllocBuffer::retire(end_of_gc, retain); | |
3714 _retired = true; | |
3715 } | |
3716 }; | |
3717 | |
3718 | |
3719 class G1ParScanThreadState : public StackObj { | |
3720 protected: | |
3721 G1CollectedHeap* _g1h; | |
3722 RefToScanQueue* _refs; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3723 DirtyCardQueue _dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3724 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3725 G1RemSet* _g1_rem; |
342 | 3726 |
3727 typedef GrowableArray<oop*> OverflowQueue; | |
3728 OverflowQueue* _overflowed_refs; | |
3729 | |
3730 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; | |
545 | 3731 ageTable _age_table; |
342 | 3732 |
3733 size_t _alloc_buffer_waste; | |
3734 size_t _undo_waste; | |
3735 | |
3736 OopsInHeapRegionClosure* _evac_failure_cl; | |
3737 G1ParScanHeapEvacClosure* _evac_cl; | |
3738 G1ParScanPartialArrayClosure* _partial_scan_cl; | |
3739 | |
3740 int _hash_seed; | |
3741 int _queue_num; | |
3742 | |
3743 int _term_attempts; | |
3744 #if G1_DETAILED_STATS | |
3745 int _pushes, _pops, _steals, _steal_attempts; | |
3746 int _overflow_pushes; | |
3747 #endif | |
3748 | |
3749 double _start; | |
3750 double _start_strong_roots; | |
3751 double _strong_roots_time; | |
3752 double _start_term; | |
3753 double _term_time; | |
3754 | |
3755 // Map from young-age-index (0 == not young, 1 is youngest) to | |
3756 // surviving words. base is what we get back from the malloc call | |
3757 size_t* _surviving_young_words_base; | |
3758 // this points into the array, as we use the first few entries for padding | |
3759 size_t* _surviving_young_words; | |
3760 | |
3761 #define PADDING_ELEM_NUM (64 / sizeof(size_t)) | |
3762 | |
3763 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } | |
3764 | |
3765 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } | |
3766 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3767 DirtyCardQueue& dirty_card_queue() { return _dcq; } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3768 CardTableModRefBS* ctbs() { return _ct_bs; } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3769 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3770 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3771 if (!from->is_survivor()) { |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3772 _g1_rem->par_write_ref(from, p, tid); |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
3773 } |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3774 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3775 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3776 void deferred_rs_update(HeapRegion* from, oop* p, int tid) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3777 // If the new value of the field points to the same region or |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3778 // is the to-space, we don't need to include it in the Rset updates. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3779 if (!from->is_in_reserved(*p) && !from->is_survivor()) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3780 size_t card_index = ctbs()->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3781 // If the card hasn't been added to the buffer, do it. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3782 if (ctbs()->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3783 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3784 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3785 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3786 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3787 |
342 | 3788 public: |
3789 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) | |
3790 : _g1h(g1h), | |
3791 _refs(g1h->task_queue(queue_num)), | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3792 _dcq(&g1h->dirty_card_queue_set()), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3793 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3794 _g1_rem(g1h->g1_rem_set()), |
342 | 3795 _hash_seed(17), _queue_num(queue_num), |
3796 _term_attempts(0), | |
545 | 3797 _age_table(false), |
342 | 3798 #if G1_DETAILED_STATS |
3799 _pushes(0), _pops(0), _steals(0), | |
3800 _steal_attempts(0), _overflow_pushes(0), | |
3801 #endif | |
3802 _strong_roots_time(0), _term_time(0), | |
3803 _alloc_buffer_waste(0), _undo_waste(0) | |
3804 { | |
3805 // we allocate G1YoungSurvRateNumRegions plus one entries, since | |
3806 // we "sacrifice" entry 0 to keep track of surviving bytes for | |
3807 // non-young regions (where the age is -1) | |
3808 // We also add a few elements at the beginning and at the end in | |
3809 // an attempt to eliminate cache contention | |
3810 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); | |
3811 size_t array_length = PADDING_ELEM_NUM + | |
3812 real_length + | |
3813 PADDING_ELEM_NUM; | |
3814 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3815 if (_surviving_young_words_base == NULL) | |
3816 vm_exit_out_of_memory(array_length * sizeof(size_t), | |
3817 "Not enough space for young surv histo."); | |
3818 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; | |
3819 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); | |
3820 | |
3821 _overflowed_refs = new OverflowQueue(10); | |
3822 | |
3823 _start = os::elapsedTime(); | |
3824 } | |
3825 | |
3826 ~G1ParScanThreadState() { | |
3827 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); | |
3828 } | |
3829 | |
3830 RefToScanQueue* refs() { return _refs; } | |
3831 OverflowQueue* overflowed_refs() { return _overflowed_refs; } | |
545 | 3832 ageTable* age_table() { return &_age_table; } |
3833 | |
3834 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { | |
342 | 3835 return &_alloc_buffers[purpose]; |
3836 } | |
3837 | |
3838 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } | |
3839 size_t undo_waste() { return _undo_waste; } | |
3840 | |
3841 void push_on_queue(oop* ref) { | |
526 | 3842 assert(ref != NULL, "invariant"); |
3843 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); | |
3844 | |
342 | 3845 if (!refs()->push(ref)) { |
3846 overflowed_refs()->push(ref); | |
3847 IF_G1_DETAILED_STATS(note_overflow_push()); | |
3848 } else { | |
3849 IF_G1_DETAILED_STATS(note_push()); | |
3850 } | |
3851 } | |
3852 | |
3853 void pop_from_queue(oop*& ref) { | |
3854 if (!refs()->pop_local(ref)) { | |
3855 ref = NULL; | |
3856 } else { | |
526 | 3857 assert(ref != NULL, "invariant"); |
3858 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), | |
3859 "invariant"); | |
3860 | |
342 | 3861 IF_G1_DETAILED_STATS(note_pop()); |
3862 } | |
3863 } | |
3864 | |
3865 void pop_from_overflow_queue(oop*& ref) { | |
3866 ref = overflowed_refs()->pop(); | |
3867 } | |
3868 | |
3869 int refs_to_scan() { return refs()->size(); } | |
3870 int overflowed_refs_to_scan() { return overflowed_refs()->length(); } | |
3871 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3872 void update_rs(HeapRegion* from, oop* p, int tid) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3873 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3874 deferred_rs_update(from, p, tid); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3875 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3876 immediate_rs_update(from, p, tid); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3877 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3878 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3879 |
342 | 3880 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { |
3881 | |
3882 HeapWord* obj = NULL; | |
3883 if (word_sz * 100 < | |
751 | 3884 (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) * |
342 | 3885 ParallelGCBufferWastePct) { |
3886 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); | |
3887 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); | |
3888 alloc_buf->retire(false, false); | |
3889 | |
3890 HeapWord* buf = | |
751 | 3891 _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize); |
342 | 3892 if (buf == NULL) return NULL; // Let caller handle allocation failure. |
3893 // Otherwise. | |
3894 alloc_buf->set_buf(buf); | |
3895 | |
3896 obj = alloc_buf->allocate(word_sz); | |
3897 assert(obj != NULL, "buffer was definitely big enough..."); | |
526 | 3898 } else { |
342 | 3899 obj = _g1h->par_allocate_during_gc(purpose, word_sz); |
3900 } | |
3901 return obj; | |
3902 } | |
3903 | |
3904 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { | |
3905 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); | |
3906 if (obj != NULL) return obj; | |
3907 return allocate_slow(purpose, word_sz); | |
3908 } | |
3909 | |
3910 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { | |
3911 if (alloc_buffer(purpose)->contains(obj)) { | |
3912 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), | |
3913 "should contain whole object"); | |
3914 alloc_buffer(purpose)->undo_allocation(obj, word_sz); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3915 } else { |
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3916 CollectedHeap::fill_with_object(obj, word_sz); |
342 | 3917 add_to_undo_waste(word_sz); |
3918 } | |
3919 } | |
3920 | |
3921 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { | |
3922 _evac_failure_cl = evac_failure_cl; | |
3923 } | |
3924 OopsInHeapRegionClosure* evac_failure_closure() { | |
3925 return _evac_failure_cl; | |
3926 } | |
3927 | |
3928 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { | |
3929 _evac_cl = evac_cl; | |
3930 } | |
3931 | |
3932 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { | |
3933 _partial_scan_cl = partial_scan_cl; | |
3934 } | |
3935 | |
3936 int* hash_seed() { return &_hash_seed; } | |
3937 int queue_num() { return _queue_num; } | |
3938 | |
3939 int term_attempts() { return _term_attempts; } | |
3940 void note_term_attempt() { _term_attempts++; } | |
3941 | |
3942 #if G1_DETAILED_STATS | |
3943 int pushes() { return _pushes; } | |
3944 int pops() { return _pops; } | |
3945 int steals() { return _steals; } | |
3946 int steal_attempts() { return _steal_attempts; } | |
3947 int overflow_pushes() { return _overflow_pushes; } | |
3948 | |
3949 void note_push() { _pushes++; } | |
3950 void note_pop() { _pops++; } | |
3951 void note_steal() { _steals++; } | |
3952 void note_steal_attempt() { _steal_attempts++; } | |
3953 void note_overflow_push() { _overflow_pushes++; } | |
3954 #endif | |
3955 | |
3956 void start_strong_roots() { | |
3957 _start_strong_roots = os::elapsedTime(); | |
3958 } | |
3959 void end_strong_roots() { | |
3960 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); | |
3961 } | |
3962 double strong_roots_time() { return _strong_roots_time; } | |
3963 | |
3964 void start_term_time() { | |
3965 note_term_attempt(); | |
3966 _start_term = os::elapsedTime(); | |
3967 } | |
3968 void end_term_time() { | |
3969 _term_time += (os::elapsedTime() - _start_term); | |
3970 } | |
3971 double term_time() { return _term_time; } | |
3972 | |
3973 double elapsed() { | |
3974 return os::elapsedTime() - _start; | |
3975 } | |
3976 | |
3977 size_t* surviving_young_words() { | |
3978 // We add on to hide entry 0 which accumulates surviving words for | |
3979 // age -1 regions (i.e. non-young ones) | |
3980 return _surviving_young_words; | |
3981 } | |
3982 | |
3983 void retire_alloc_buffers() { | |
3984 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3985 size_t waste = _alloc_buffers[ap].words_remaining(); | |
3986 add_to_alloc_buffer_waste(waste); | |
3987 _alloc_buffers[ap].retire(true, false); | |
3988 } | |
3989 } | |
3990 | |
526 | 3991 private: |
3992 void deal_with_reference(oop* ref_to_scan) { | |
3993 if (has_partial_array_mask(ref_to_scan)) { | |
3994 _partial_scan_cl->do_oop_nv(ref_to_scan); | |
3995 } else { | |
3996 // Note: we can use "raw" versions of "region_containing" because | |
3997 // "obj_to_scan" is definitely in the heap, and is not in a | |
3998 // humongous region. | |
3999 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); | |
4000 _evac_cl->set_region(r); | |
4001 _evac_cl->do_oop_nv(ref_to_scan); | |
4002 } | |
4003 } | |
4004 | |
4005 public: | |
342 | 4006 void trim_queue() { |
526 | 4007 // I've replicated the loop twice, first to drain the overflow |
4008 // queue, second to drain the task queue. This is better than | |
4009 // having a single loop, which checks both conditions and, inside | |
4010 // it, either pops the overflow queue or the task queue, as each | |
4011 // loop is tighter. Also, the decision to drain the overflow queue | |
4012 // first is not arbitrary, as the overflow queue is not visible | |
4013 // to the other workers, whereas the task queue is. So, we want to | |
4014 // drain the "invisible" entries first, while allowing the other | |
4015 // workers to potentially steal the "visible" entries. | |
4016 | |
342 | 4017 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { |
526 | 4018 while (overflowed_refs_to_scan() > 0) { |
4019 oop *ref_to_scan = NULL; | |
342 | 4020 pop_from_overflow_queue(ref_to_scan); |
526 | 4021 assert(ref_to_scan != NULL, "invariant"); |
4022 // We shouldn't have pushed it on the queue if it was not | |
4023 // pointing into the CSet. | |
4024 assert(ref_to_scan != NULL, "sanity"); | |
4025 assert(has_partial_array_mask(ref_to_scan) || | |
4026 _g1h->obj_in_cs(*ref_to_scan), "sanity"); | |
4027 | |
4028 deal_with_reference(ref_to_scan); | |
342 | 4029 } |
526 | 4030 |
4031 while (refs_to_scan() > 0) { | |
4032 oop *ref_to_scan = NULL; | |
4033 pop_from_queue(ref_to_scan); | |
4034 | |
4035 if (ref_to_scan != NULL) { | |
4036 // We shouldn't have pushed it on the queue if it was not | |
4037 // pointing into the CSet. | |
4038 assert(has_partial_array_mask(ref_to_scan) || | |
4039 _g1h->obj_in_cs(*ref_to_scan), "sanity"); | |
4040 | |
4041 deal_with_reference(ref_to_scan); | |
342 | 4042 } |
4043 } | |
4044 } | |
4045 } | |
4046 }; | |
4047 | |
4048 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | |
4049 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
4050 _par_scan_state(par_scan_state) { } | |
4051 | |
4052 // This closure is applied to the fields of the objects that have just been copied. | |
4053 // Should probably be made inline and moved in g1OopClosures.inline.hpp. | |
4054 void G1ParScanClosure::do_oop_nv(oop* p) { | |
4055 oop obj = *p; | |
526 | 4056 |
342 | 4057 if (obj != NULL) { |
526 | 4058 if (_g1->in_cset_fast_test(obj)) { |
4059 // We're not going to even bother checking whether the object is | |
4060 // already forwarded or not, as this usually causes an immediate | |
4061 // stall. We'll try to prefetch the object (for write, given that | |
4062 // we might need to install the forwarding reference) and we'll | |
4063 // get back to it when pop it from the queue | |
4064 Prefetch::write(obj->mark_addr(), 0); | |
4065 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); | |
4066 | |
4067 // slightly paranoid test; I'm trying to catch potential | |
4068 // problems before we go into push_on_queue to know where the | |
4069 // problem is coming from | |
4070 assert(obj == *p, "the value of *p should not have changed"); | |
4071 _par_scan_state->push_on_queue(p); | |
4072 } else { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4073 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4074 } |
4075 } | |
4076 } | |
4077 | |
4078 void G1ParCopyHelper::mark_forwardee(oop* p) { | |
4079 // This is called _after_ do_oop_work has been called, hence after | |
4080 // the object has been relocated to its new location and *p points | |
4081 // to its new location. | |
4082 | |
4083 oop thisOop = *p; | |
4084 if (thisOop != NULL) { | |
4085 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), | |
4086 "shouldn't still be in the CSet if evacuation didn't fail."); | |
4087 HeapWord* addr = (HeapWord*)thisOop; | |
4088 if (_g1->is_in_g1_reserved(addr)) | |
4089 _cm->grayRoot(oop(addr)); | |
4090 } | |
4091 } | |
4092 | |
4093 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
4094 size_t word_sz = old->size(); | |
4095 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
4096 // +1 to make the -1 indexes valid... | |
4097 int young_index = from_region->young_index_in_cset()+1; | |
4098 assert( (from_region->is_young() && young_index > 0) || | |
4099 (!from_region->is_young() && young_index == 0), "invariant" ); | |
4100 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
4101 markOop m = old->mark(); | |
545 | 4102 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
4103 : m->age(); | |
4104 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 4105 word_sz); |
4106 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
4107 oop obj = oop(obj_ptr); | |
4108 | |
4109 if (obj_ptr == NULL) { | |
4110 // This will either forward-to-self, or detect that someone else has | |
4111 // installed a forwarding pointer. | |
4112 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
4113 return _g1->handle_evacuation_failure_par(cl, old); | |
4114 } | |
4115 | |
526 | 4116 // We're going to allocate linearly, so might as well prefetch ahead. |
4117 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
4118 | |
342 | 4119 oop forward_ptr = old->forward_to_atomic(obj); |
4120 if (forward_ptr == NULL) { | |
4121 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 4122 if (g1p->track_object_age(alloc_purpose)) { |
4123 // We could simply do obj->incr_age(). However, this causes a | |
4124 // performance issue. obj->incr_age() will first check whether | |
4125 // the object has a displaced mark by checking its mark word; | |
4126 // getting the mark word from the new location of the object | |
4127 // stalls. So, given that we already have the mark word and we | |
4128 // are about to install it anyway, it's better to increase the | |
4129 // age on the mark word, when the object does not have a | |
4130 // displaced mark word. We're not expecting many objects to have | |
4131 // a displaced marked word, so that case is not optimized | |
4132 // further (it could be...) and we simply call obj->incr_age(). | |
4133 | |
4134 if (m->has_displaced_mark_helper()) { | |
4135 // in this case, we have to install the mark word first, | |
4136 // otherwise obj looks to be forwarded (the old mark word, | |
4137 // which contains the forward pointer, was copied) | |
4138 obj->set_mark(m); | |
4139 obj->incr_age(); | |
4140 } else { | |
4141 m = m->incr_age(); | |
545 | 4142 obj->set_mark(m); |
526 | 4143 } |
545 | 4144 _par_scan_state->age_table()->add(obj, word_sz); |
4145 } else { | |
4146 obj->set_mark(m); | |
526 | 4147 } |
4148 | |
342 | 4149 // preserve "next" mark bit |
4150 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
4151 if (!use_local_bitmaps || | |
4152 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
4153 // if we couldn't mark it on the local bitmap (this happens when | |
4154 // the object was not allocated in the GCLab), we have to bite | |
4155 // the bullet and do the standard parallel mark | |
4156 _cm->markAndGrayObjectIfNecessary(obj); | |
4157 } | |
4158 #if 1 | |
4159 if (_g1->isMarkedNext(old)) { | |
4160 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
4161 } | |
4162 #endif | |
4163 } | |
4164 | |
4165 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
4166 surv_young_words[young_index] += word_sz; | |
4167 | |
4168 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
4169 arrayOop(old)->set_length(0); | |
526 | 4170 _par_scan_state->push_on_queue(set_partial_array_mask(old)); |
342 | 4171 } else { |
526 | 4172 // No point in using the slower heap_region_containing() method, |
4173 // given that we know obj is in the heap. | |
4174 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 4175 obj->oop_iterate_backwards(_scanner); |
4176 } | |
4177 } else { | |
4178 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
4179 obj = forward_ptr; | |
4180 } | |
4181 return obj; | |
4182 } | |
4183 | |
526 | 4184 template<bool do_gen_barrier, G1Barrier barrier, |
4185 bool do_mark_forwardee, bool skip_cset_test> | |
4186 void G1ParCopyClosure<do_gen_barrier, barrier, | |
4187 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) { | |
342 | 4188 oop obj = *p; |
4189 assert(barrier != G1BarrierRS || obj != NULL, | |
4190 "Precondition: G1BarrierRS implies obj is nonNull"); | |
4191 | |
526 | 4192 // The only time we skip the cset test is when we're scanning |
4193 // references popped from the queue. And we only push on the queue | |
4194 // references that we know point into the cset, so no point in | |
4195 // checking again. But we'll leave an assert here for peace of mind. | |
4196 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); | |
4197 | |
4198 // here the null check is implicit in the cset_fast_test() test | |
4199 if (skip_cset_test || _g1->in_cset_fast_test(obj)) { | |
342 | 4200 #if G1_REM_SET_LOGGING |
526 | 4201 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
4202 "into CS.", p, (void*) obj); | |
342 | 4203 #endif |
526 | 4204 if (obj->is_forwarded()) { |
4205 *p = obj->forwardee(); | |
4206 } else { | |
4207 *p = copy_to_survivor_space(obj); | |
342 | 4208 } |
526 | 4209 // When scanning the RS, we only care about objs in CS. |
4210 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4211 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 4212 } |
526 | 4213 } |
4214 | |
4215 // When scanning moved objs, must look at all oops. | |
4216 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4217 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 4218 } |
4219 | |
4220 if (do_gen_barrier && obj != NULL) { | |
4221 par_do_barrier(p); | |
4222 } | |
4223 } | |
4224 | |
4225 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); | |
4226 | |
4227 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk( | |
342 | 4228 oop obj, int start, int end) { |
4229 // process our set of indices (include header in first chunk) | |
4230 assert(start < end, "invariant"); | |
4231 T* const base = (T*)objArrayOop(obj)->base(); | |
526 | 4232 T* const start_addr = (start == 0) ? (T*) obj : base + start; |
342 | 4233 T* const end_addr = base + end; |
4234 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); | |
4235 _scanner.set_region(_g1->heap_region_containing(obj)); | |
4236 obj->oop_iterate(&_scanner, mr); | |
4237 } | |
4238 | |
4239 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { | |
4240 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); | |
526 | 4241 assert(has_partial_array_mask(p), "invariant"); |
4242 oop old = clear_partial_array_mask(p); | |
342 | 4243 assert(old->is_objArray(), "must be obj array"); |
4244 assert(old->is_forwarded(), "must be forwarded"); | |
4245 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
4246 | |
4247 objArrayOop obj = objArrayOop(old->forwardee()); | |
4248 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
4249 // Process ParGCArrayScanChunk elements now | |
4250 // and push the remainder back onto queue | |
4251 int start = arrayOop(old)->length(); | |
4252 int end = obj->length(); | |
4253 int remainder = end - start; | |
4254 assert(start <= end, "just checking"); | |
4255 if (remainder > 2 * ParGCArrayScanChunk) { | |
4256 // Test above combines last partial chunk with a full chunk | |
4257 end = start + ParGCArrayScanChunk; | |
4258 arrayOop(old)->set_length(end); | |
4259 // Push remainder. | |
526 | 4260 _par_scan_state->push_on_queue(set_partial_array_mask(old)); |
342 | 4261 } else { |
4262 // Restore length so that the heap remains parsable in | |
4263 // case of evacuation failure. | |
4264 arrayOop(old)->set_length(end); | |
4265 } | |
4266 | |
4267 // process our set of indices (include header in first chunk) | |
4268 process_array_chunk<oop>(obj, start, end); | |
4269 } | |
4270 | |
4271 int G1ScanAndBalanceClosure::_nq = 0; | |
4272 | |
4273 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
4274 protected: | |
4275 G1CollectedHeap* _g1h; | |
4276 G1ParScanThreadState* _par_scan_state; | |
4277 RefToScanQueueSet* _queues; | |
4278 ParallelTaskTerminator* _terminator; | |
4279 | |
4280 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
4281 RefToScanQueueSet* queues() { return _queues; } | |
4282 ParallelTaskTerminator* terminator() { return _terminator; } | |
4283 | |
4284 public: | |
4285 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
4286 G1ParScanThreadState* par_scan_state, | |
4287 RefToScanQueueSet* queues, | |
4288 ParallelTaskTerminator* terminator) | |
4289 : _g1h(g1h), _par_scan_state(par_scan_state), | |
4290 _queues(queues), _terminator(terminator) {} | |
4291 | |
4292 void do_void() { | |
4293 G1ParScanThreadState* pss = par_scan_state(); | |
4294 while (true) { | |
4295 oop* ref_to_scan; | |
4296 pss->trim_queue(); | |
4297 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); | |
4298 if (queues()->steal(pss->queue_num(), | |
4299 pss->hash_seed(), | |
4300 ref_to_scan)) { | |
4301 IF_G1_DETAILED_STATS(pss->note_steal()); | |
526 | 4302 |
4303 // slightly paranoid tests; I'm trying to catch potential | |
4304 // problems before we go into push_on_queue to know where the | |
4305 // problem is coming from | |
4306 assert(ref_to_scan != NULL, "invariant"); | |
4307 assert(has_partial_array_mask(ref_to_scan) || | |
4308 _g1h->obj_in_cs(*ref_to_scan), "invariant"); | |
342 | 4309 pss->push_on_queue(ref_to_scan); |
4310 continue; | |
4311 } | |
4312 pss->start_term_time(); | |
4313 if (terminator()->offer_termination()) break; | |
4314 pss->end_term_time(); | |
4315 } | |
4316 pss->end_term_time(); | |
4317 pss->retire_alloc_buffers(); | |
4318 } | |
4319 }; | |
4320 | |
4321 class G1ParTask : public AbstractGangTask { | |
4322 protected: | |
4323 G1CollectedHeap* _g1h; | |
4324 RefToScanQueueSet *_queues; | |
4325 ParallelTaskTerminator _terminator; | |
4326 | |
4327 Mutex _stats_lock; | |
4328 Mutex* stats_lock() { return &_stats_lock; } | |
4329 | |
4330 size_t getNCards() { | |
4331 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
4332 / G1BlockOffsetSharedArray::N_bytes; | |
4333 } | |
4334 | |
4335 public: | |
4336 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
4337 : AbstractGangTask("G1 collection"), | |
4338 _g1h(g1h), | |
4339 _queues(task_queues), | |
4340 _terminator(workers, _queues), | |
4341 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) | |
4342 {} | |
4343 | |
4344 RefToScanQueueSet* queues() { return _queues; } | |
4345 | |
4346 RefToScanQueue *work_queue(int i) { | |
4347 return queues()->queue(i); | |
4348 } | |
4349 | |
4350 void work(int i) { | |
4351 ResourceMark rm; | |
4352 HandleMark hm; | |
4353 | |
526 | 4354 G1ParScanThreadState pss(_g1h, i); |
4355 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
4356 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
4357 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 4358 |
4359 pss.set_evac_closure(&scan_evac_cl); | |
4360 pss.set_evac_failure_closure(&evac_failure_cl); | |
4361 pss.set_partial_scan_closure(&partial_scan_cl); | |
4362 | |
4363 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
4364 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
4365 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4366 |
342 | 4367 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
4368 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
4369 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
4370 | |
4371 OopsInHeapRegionClosure *scan_root_cl; | |
4372 OopsInHeapRegionClosure *scan_perm_cl; | |
4373 OopsInHeapRegionClosure *scan_so_cl; | |
4374 | |
4375 if (_g1h->g1_policy()->should_initiate_conc_mark()) { | |
4376 scan_root_cl = &scan_mark_root_cl; | |
4377 scan_perm_cl = &scan_mark_perm_cl; | |
4378 scan_so_cl = &scan_mark_heap_rs_cl; | |
4379 } else { | |
4380 scan_root_cl = &only_scan_root_cl; | |
4381 scan_perm_cl = &only_scan_perm_cl; | |
4382 scan_so_cl = &only_scan_heap_rs_cl; | |
4383 } | |
4384 | |
4385 pss.start_strong_roots(); | |
4386 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4387 SharedHeap::SO_AllClasses, | |
4388 scan_root_cl, | |
4389 &only_scan_heap_rs_cl, | |
4390 scan_so_cl, | |
4391 scan_perm_cl, | |
4392 i); | |
4393 pss.end_strong_roots(); | |
4394 { | |
4395 double start = os::elapsedTime(); | |
4396 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4397 evac.do_void(); | |
4398 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4399 double term_ms = pss.term_time()*1000.0; | |
4400 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
4401 _g1h->g1_policy()->record_termination_time(i, term_ms); | |
4402 } | |
751 | 4403 if (G1UseSurvivorSpaces) { |
545 | 4404 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
4405 } | |
342 | 4406 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
4407 | |
4408 // Clean up any par-expanded rem sets. | |
4409 HeapRegionRemSet::par_cleanup(); | |
4410 | |
4411 MutexLocker x(stats_lock()); | |
4412 if (ParallelGCVerbose) { | |
4413 gclog_or_tty->print("Thread %d complete:\n", i); | |
4414 #if G1_DETAILED_STATS | |
4415 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", | |
4416 pss.pushes(), | |
4417 pss.pops(), | |
4418 pss.overflow_pushes(), | |
4419 pss.steals(), | |
4420 pss.steal_attempts()); | |
4421 #endif | |
4422 double elapsed = pss.elapsed(); | |
4423 double strong_roots = pss.strong_roots_time(); | |
4424 double term = pss.term_time(); | |
4425 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" | |
4426 " Strong roots: %7.2f ms (%6.2f%%)\n" | |
4427 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", | |
4428 elapsed * 1000.0, | |
4429 strong_roots * 1000.0, (strong_roots*100.0/elapsed), | |
4430 term * 1000.0, (term*100.0/elapsed), | |
4431 pss.term_attempts()); | |
4432 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); | |
4433 gclog_or_tty->print(" Waste: %8dK\n" | |
4434 " Alloc Buffer: %8dK\n" | |
4435 " Undo: %8dK\n", | |
4436 (total_waste * HeapWordSize) / K, | |
4437 (pss.alloc_buffer_waste() * HeapWordSize) / K, | |
4438 (pss.undo_waste() * HeapWordSize) / K); | |
4439 } | |
4440 | |
4441 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
4442 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
4443 } | |
4444 }; | |
4445 | |
4446 // *** Common G1 Evacuation Stuff | |
4447 | |
4448 class G1CountClosure: public OopsInHeapRegionClosure { | |
4449 public: | |
4450 int n; | |
4451 G1CountClosure() : n(0) {} | |
4452 void do_oop(narrowOop* p) { | |
4453 guarantee(false, "NYI"); | |
4454 } | |
4455 void do_oop(oop* p) { | |
4456 oop obj = *p; | |
4457 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), | |
4458 "Rem set closure called on non-rem-set pointer."); | |
4459 n++; | |
4460 } | |
4461 }; | |
4462 | |
4463 static G1CountClosure count_closure; | |
4464 | |
4465 void | |
4466 G1CollectedHeap:: | |
4467 g1_process_strong_roots(bool collecting_perm_gen, | |
4468 SharedHeap::ScanningOption so, | |
4469 OopClosure* scan_non_heap_roots, | |
4470 OopsInHeapRegionClosure* scan_rs, | |
4471 OopsInHeapRegionClosure* scan_so, | |
4472 OopsInGenClosure* scan_perm, | |
4473 int worker_i) { | |
4474 // First scan the strong roots, including the perm gen. | |
4475 double ext_roots_start = os::elapsedTime(); | |
4476 double closure_app_time_sec = 0.0; | |
4477 | |
4478 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4479 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4480 buf_scan_perm.set_generation(perm_gen()); | |
4481 | |
4482 process_strong_roots(collecting_perm_gen, so, | |
4483 &buf_scan_non_heap_roots, | |
4484 &buf_scan_perm); | |
4485 // Finish up any enqueued closure apps. | |
4486 buf_scan_non_heap_roots.done(); | |
4487 buf_scan_perm.done(); | |
4488 double ext_roots_end = os::elapsedTime(); | |
4489 g1_policy()->reset_obj_copy_time(worker_i); | |
4490 double obj_copy_time_sec = | |
4491 buf_scan_non_heap_roots.closure_app_seconds() + | |
4492 buf_scan_perm.closure_app_seconds(); | |
4493 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4494 double ext_root_time_ms = | |
4495 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4496 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4497 | |
4498 // Scan strong roots in mark stack. | |
4499 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4500 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4501 } | |
4502 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4503 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4504 | |
4505 // XXX What should this be doing in the parallel case? | |
4506 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4507 if (scan_so != NULL) { | |
4508 scan_scan_only_set(scan_so, worker_i); | |
4509 } | |
4510 // Now scan the complement of the collection set. | |
4511 if (scan_rs != NULL) { | |
4512 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4513 } | |
4514 // Finish with the ref_processor roots. | |
4515 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4516 ref_processor()->oops_do(scan_non_heap_roots); | |
4517 } | |
4518 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4519 _process_strong_tasks->all_tasks_completed(); | |
4520 } | |
4521 | |
4522 void | |
4523 G1CollectedHeap::scan_scan_only_region(HeapRegion* r, | |
4524 OopsInHeapRegionClosure* oc, | |
4525 int worker_i) { | |
4526 HeapWord* startAddr = r->bottom(); | |
4527 HeapWord* endAddr = r->used_region().end(); | |
4528 | |
4529 oc->set_region(r); | |
4530 | |
4531 HeapWord* p = r->bottom(); | |
4532 HeapWord* t = r->top(); | |
4533 guarantee( p == r->next_top_at_mark_start(), "invariant" ); | |
4534 while (p < t) { | |
4535 oop obj = oop(p); | |
4536 p += obj->oop_iterate(oc); | |
4537 } | |
4538 } | |
4539 | |
4540 void | |
4541 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
4542 int worker_i) { | |
4543 double start = os::elapsedTime(); | |
4544 | |
4545 BufferingOopsInHeapRegionClosure boc(oc); | |
4546 | |
4547 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); | |
4548 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); | |
4549 | |
4550 OopsInHeapRegionClosure *foc; | |
4551 if (g1_policy()->should_initiate_conc_mark()) | |
4552 foc = &scan_and_mark; | |
4553 else | |
4554 foc = &scan_only; | |
4555 | |
4556 HeapRegion* hr; | |
4557 int n = 0; | |
4558 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { | |
4559 scan_scan_only_region(hr, foc, worker_i); | |
4560 ++n; | |
4561 } | |
4562 boc.done(); | |
4563 | |
4564 double closure_app_s = boc.closure_app_seconds(); | |
4565 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); | |
4566 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; | |
4567 g1_policy()->record_scan_only_time(worker_i, ms, n); | |
4568 } | |
4569 | |
4570 void | |
4571 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4572 OopClosure* non_root_closure) { | |
4573 SharedHeap::process_weak_roots(root_closure, non_root_closure); | |
4574 } | |
4575 | |
4576 | |
4577 class SaveMarksClosure: public HeapRegionClosure { | |
4578 public: | |
4579 bool doHeapRegion(HeapRegion* r) { | |
4580 r->save_marks(); | |
4581 return false; | |
4582 } | |
4583 }; | |
4584 | |
4585 void G1CollectedHeap::save_marks() { | |
4586 if (ParallelGCThreads == 0) { | |
4587 SaveMarksClosure sm; | |
4588 heap_region_iterate(&sm); | |
4589 } | |
4590 // We do this even in the parallel case | |
4591 perm_gen()->save_marks(); | |
4592 } | |
4593 | |
4594 void G1CollectedHeap::evacuate_collection_set() { | |
4595 set_evacuation_failed(false); | |
4596 | |
4597 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4598 concurrent_g1_refine()->set_use_cache(false); | |
4599 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); | |
4600 set_par_threads(n_workers); | |
4601 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4602 | |
4603 init_for_evac_failure(NULL); | |
4604 | |
4605 change_strong_roots_parity(); // In preparation for parallel strong roots. | |
4606 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4607 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4608 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4609 double start_par = os::elapsedTime(); |
4610 if (ParallelGCThreads > 0) { | |
4611 // The individual threads will set their evac-failure closures. | |
4612 workers()->run_task(&g1_par_task); | |
4613 } else { | |
4614 g1_par_task.work(0); | |
4615 } | |
4616 | |
4617 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4618 g1_policy()->record_par_time(par_time); | |
4619 set_par_threads(0); | |
4620 // Is this the right thing to do here? We don't save marks | |
4621 // on individual heap regions when we allocate from | |
4622 // them in parallel, so this seems like the correct place for this. | |
545 | 4623 retire_all_alloc_regions(); |
342 | 4624 { |
4625 G1IsAliveClosure is_alive(this); | |
4626 G1KeepAliveClosure keep_alive(this); | |
4627 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4628 } | |
4629 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4630 |
342 | 4631 concurrent_g1_refine()->set_use_cache(true); |
4632 | |
4633 finalize_for_evac_failure(); | |
4634 | |
4635 // Must do this before removing self-forwarding pointers, which clears | |
4636 // the per-region evac-failure flags. | |
4637 concurrent_mark()->complete_marking_in_collection_set(); | |
4638 | |
4639 if (evacuation_failed()) { | |
4640 remove_self_forwarding_pointers(); | |
4641 if (PrintGCDetails) { | |
4642 gclog_or_tty->print(" (evacuation failed)"); | |
4643 } else if (PrintGC) { | |
4644 gclog_or_tty->print("--"); | |
4645 } | |
4646 } | |
4647 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4648 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4649 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4650 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4651 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4652 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4653 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4654 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4655 |
342 | 4656 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4657 } | |
4658 | |
4659 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4660 size_t pre_used = 0; | |
4661 size_t cleared_h_regions = 0; | |
4662 size_t freed_regions = 0; | |
4663 UncleanRegionList local_list; | |
4664 | |
4665 HeapWord* start = hr->bottom(); | |
4666 HeapWord* end = hr->prev_top_at_mark_start(); | |
4667 size_t used_bytes = hr->used(); | |
4668 size_t live_bytes = hr->max_live_bytes(); | |
4669 if (used_bytes > 0) { | |
4670 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4671 } else { | |
4672 guarantee( live_bytes == 0, "invariant" ); | |
4673 } | |
4674 | |
4675 size_t garbage_bytes = used_bytes - live_bytes; | |
4676 if (garbage_bytes > 0) | |
4677 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4678 | |
4679 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4680 &local_list); | |
4681 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4682 &local_list); | |
4683 } | |
4684 | |
4685 void | |
4686 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4687 size_t& pre_used, | |
4688 size_t& cleared_h_regions, | |
4689 size_t& freed_regions, | |
4690 UncleanRegionList* list, | |
4691 bool par) { | |
4692 pre_used += hr->used(); | |
4693 if (hr->isHumongous()) { | |
4694 assert(hr->startsHumongous(), | |
4695 "Only the start of a humongous region should be freed."); | |
4696 int ind = _hrs->find(hr); | |
4697 assert(ind != -1, "Should have an index."); | |
4698 // Clear the start region. | |
4699 hr->hr_clear(par, true /*clear_space*/); | |
4700 list->insert_before_head(hr); | |
4701 cleared_h_regions++; | |
4702 freed_regions++; | |
4703 // Clear any continued regions. | |
4704 ind++; | |
4705 while ((size_t)ind < n_regions()) { | |
4706 HeapRegion* hrc = _hrs->at(ind); | |
4707 if (!hrc->continuesHumongous()) break; | |
4708 // Otherwise, does continue the H region. | |
4709 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4710 hrc->hr_clear(par, true /*clear_space*/); | |
4711 cleared_h_regions++; | |
4712 freed_regions++; | |
4713 list->insert_before_head(hrc); | |
4714 ind++; | |
4715 } | |
4716 } else { | |
4717 hr->hr_clear(par, true /*clear_space*/); | |
4718 list->insert_before_head(hr); | |
4719 freed_regions++; | |
4720 // If we're using clear2, this should not be enabled. | |
4721 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4722 } | |
4723 } | |
4724 | |
4725 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4726 size_t cleared_h_regions, | |
4727 size_t freed_regions, | |
4728 UncleanRegionList* list) { | |
4729 if (list != NULL && list->sz() > 0) { | |
4730 prepend_region_list_on_unclean_list(list); | |
4731 } | |
4732 // Acquire a lock, if we're parallel, to update possibly-shared | |
4733 // variables. | |
4734 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4735 { | |
4736 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4737 _summary_bytes_used -= pre_used; | |
4738 _num_humongous_regions -= (int) cleared_h_regions; | |
4739 _free_regions += freed_regions; | |
4740 } | |
4741 } | |
4742 | |
4743 | |
4744 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4745 while (list != NULL) { | |
4746 guarantee( list->is_young(), "invariant" ); | |
4747 | |
4748 HeapWord* bottom = list->bottom(); | |
4749 HeapWord* end = list->end(); | |
4750 MemRegion mr(bottom, end); | |
4751 ct_bs->dirty(mr); | |
4752 | |
4753 list = list->get_next_young_region(); | |
4754 } | |
4755 } | |
4756 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4757 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4758 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4759 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4760 G1CollectedHeap* _g1h; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4761 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4762 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4763 G1CollectedHeap* g1h) : |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4764 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4765 _ct_bs(ct_bs), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4766 _g1h(g1h) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4767 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4768 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4769 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4770 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4771 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4772 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4773 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4774 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4775 void clear_cards(HeapRegion* r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4776 // Cards for Survivor and Scan-Only regions will be dirtied later. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4777 if (!r->is_scan_only() && !r->is_survivor()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4778 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4779 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4780 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4781 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4782 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4783 |
342 | 4784 void G1CollectedHeap::cleanUpCardTable() { |
4785 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4786 double start = os::elapsedTime(); | |
4787 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4788 // Iterate over the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4789 G1ParCleanupCTTask cleanup_task(ct_bs, this); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4790 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4791 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4792 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4793 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4794 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4795 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4796 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4797 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4798 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4799 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4800 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4801 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4802 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4803 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4804 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4805 } |
342 | 4806 // now, redirty the cards of the scan-only and survivor regions |
4807 // (it seemed faster to do it this way, instead of iterating over | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4808 // all regions and then clearing / dirtying as appropriate) |
342 | 4809 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); |
4810 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); | |
4811 | |
4812 double elapsed = os::elapsedTime() - start; | |
4813 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
4814 } | |
4815 | |
4816 | |
4817 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4818 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4819 do_collection_pause(); | |
4820 } | |
4821 } | |
4822 | |
4823 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4824 double young_time_ms = 0.0; | |
4825 double non_young_time_ms = 0.0; | |
4826 | |
4827 G1CollectorPolicy* policy = g1_policy(); | |
4828 | |
4829 double start_sec = os::elapsedTime(); | |
4830 bool non_young = true; | |
4831 | |
4832 HeapRegion* cur = cs_head; | |
4833 int age_bound = -1; | |
4834 size_t rs_lengths = 0; | |
4835 | |
4836 while (cur != NULL) { | |
4837 if (non_young) { | |
4838 if (cur->is_young()) { | |
4839 double end_sec = os::elapsedTime(); | |
4840 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4841 non_young_time_ms += elapsed_ms; | |
4842 | |
4843 start_sec = os::elapsedTime(); | |
4844 non_young = false; | |
4845 } | |
4846 } else { | |
4847 if (!cur->is_on_free_list()) { | |
4848 double end_sec = os::elapsedTime(); | |
4849 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4850 young_time_ms += elapsed_ms; | |
4851 | |
4852 start_sec = os::elapsedTime(); | |
4853 non_young = true; | |
4854 } | |
4855 } | |
4856 | |
4857 rs_lengths += cur->rem_set()->occupied(); | |
4858 | |
4859 HeapRegion* next = cur->next_in_collection_set(); | |
4860 assert(cur->in_collection_set(), "bad CS"); | |
4861 cur->set_next_in_collection_set(NULL); | |
4862 cur->set_in_collection_set(false); | |
4863 | |
4864 if (cur->is_young()) { | |
4865 int index = cur->young_index_in_cset(); | |
4866 guarantee( index != -1, "invariant" ); | |
4867 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4868 size_t words_survived = _surviving_young_words[index]; | |
4869 cur->record_surv_words_in_group(words_survived); | |
4870 } else { | |
4871 int index = cur->young_index_in_cset(); | |
4872 guarantee( index == -1, "invariant" ); | |
4873 } | |
4874 | |
4875 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4876 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4877 "invariant" ); | |
4878 | |
4879 if (!cur->evacuation_failed()) { | |
4880 // And the region is empty. | |
4881 assert(!cur->is_empty(), | |
4882 "Should not have empty regions in a CS."); | |
4883 free_region(cur); | |
4884 } else { | |
4885 guarantee( !cur->is_scan_only(), "should not be scan only" ); | |
4886 cur->uninstall_surv_rate_group(); | |
4887 if (cur->is_young()) | |
4888 cur->set_young_index_in_cset(-1); | |
4889 cur->set_not_young(); | |
4890 cur->set_evacuation_failed(false); | |
4891 } | |
4892 cur = next; | |
4893 } | |
4894 | |
4895 policy->record_max_rs_lengths(rs_lengths); | |
4896 policy->cset_regions_freed(); | |
4897 | |
4898 double end_sec = os::elapsedTime(); | |
4899 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4900 if (non_young) | |
4901 non_young_time_ms += elapsed_ms; | |
4902 else | |
4903 young_time_ms += elapsed_ms; | |
4904 | |
4905 policy->record_young_free_cset_time_ms(young_time_ms); | |
4906 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4907 } | |
4908 | |
4909 HeapRegion* | |
4910 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4911 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4912 HeapRegion* res = pop_unclean_region_list_locked(); | |
4913 if (res != NULL) { | |
4914 assert(!res->continuesHumongous() && | |
4915 res->zero_fill_state() != HeapRegion::Allocated, | |
4916 "Only free regions on unclean list."); | |
4917 if (zero_filled) { | |
4918 res->ensure_zero_filled_locked(); | |
4919 res->set_zero_fill_allocated(); | |
4920 } | |
4921 } | |
4922 return res; | |
4923 } | |
4924 | |
4925 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4926 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4927 return alloc_region_from_unclean_list_locked(zero_filled); | |
4928 } | |
4929 | |
4930 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4931 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4932 put_region_on_unclean_list_locked(r); | |
4933 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4934 } | |
4935 | |
4936 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4937 MutexLockerEx x(Cleanup_mon); | |
4938 set_unclean_regions_coming_locked(b); | |
4939 } | |
4940 | |
4941 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4942 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4943 _unclean_regions_coming = b; | |
4944 // Wake up mutator threads that might be waiting for completeCleanup to | |
4945 // finish. | |
4946 if (!b) Cleanup_mon->notify_all(); | |
4947 } | |
4948 | |
4949 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4950 MutexLockerEx x(Cleanup_mon); | |
4951 wait_for_cleanup_complete_locked(); | |
4952 } | |
4953 | |
4954 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4955 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4956 while (_unclean_regions_coming) { | |
4957 Cleanup_mon->wait(); | |
4958 } | |
4959 } | |
4960 | |
4961 void | |
4962 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4963 assert(ZF_mon->owned_by_self(), "precondition."); | |
4964 _unclean_region_list.insert_before_head(r); | |
4965 } | |
4966 | |
4967 void | |
4968 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4969 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4970 prepend_region_list_on_unclean_list_locked(list); | |
4971 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4972 } | |
4973 | |
4974 void | |
4975 G1CollectedHeap:: | |
4976 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4977 assert(ZF_mon->owned_by_self(), "precondition."); | |
4978 _unclean_region_list.prepend_list(list); | |
4979 } | |
4980 | |
4981 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4982 assert(ZF_mon->owned_by_self(), "precondition."); | |
4983 HeapRegion* res = _unclean_region_list.pop(); | |
4984 if (res != NULL) { | |
4985 // Inform ZF thread that there's a new unclean head. | |
4986 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4987 ZF_mon->notify_all(); | |
4988 } | |
4989 return res; | |
4990 } | |
4991 | |
4992 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4993 assert(ZF_mon->owned_by_self(), "precondition."); | |
4994 return _unclean_region_list.hd(); | |
4995 } | |
4996 | |
4997 | |
4998 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4999 assert(ZF_mon->owned_by_self(), "Precondition"); | |
5000 HeapRegion* r = peek_unclean_region_list_locked(); | |
5001 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
5002 // Result of below must be equal to "r", since we hold the lock. | |
5003 (void)pop_unclean_region_list_locked(); | |
5004 put_free_region_on_list_locked(r); | |
5005 return true; | |
5006 } else { | |
5007 return false; | |
5008 } | |
5009 } | |
5010 | |
5011 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
5012 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5013 return move_cleaned_region_to_free_list_locked(); | |
5014 } | |
5015 | |
5016 | |
5017 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
5018 assert(ZF_mon->owned_by_self(), "precondition."); | |
5019 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5020 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
5021 "Regions on free list must be zero filled"); | |
5022 assert(!r->isHumongous(), "Must not be humongous."); | |
5023 assert(r->is_empty(), "Better be empty"); | |
5024 assert(!r->is_on_free_list(), | |
5025 "Better not already be on free list"); | |
5026 assert(!r->is_on_unclean_list(), | |
5027 "Better not already be on unclean list"); | |
5028 r->set_on_free_list(true); | |
5029 r->set_next_on_free_list(_free_region_list); | |
5030 _free_region_list = r; | |
5031 _free_region_list_size++; | |
5032 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5033 } | |
5034 | |
5035 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
5036 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5037 put_free_region_on_list_locked(r); | |
5038 } | |
5039 | |
5040 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
5041 assert(ZF_mon->owned_by_self(), "precondition."); | |
5042 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5043 HeapRegion* res = _free_region_list; | |
5044 if (res != NULL) { | |
5045 _free_region_list = res->next_from_free_list(); | |
5046 _free_region_list_size--; | |
5047 res->set_on_free_list(false); | |
5048 res->set_next_on_free_list(NULL); | |
5049 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5050 } | |
5051 return res; | |
5052 } | |
5053 | |
5054 | |
5055 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
5056 // By self, or on behalf of self. | |
5057 assert(Heap_lock->is_locked(), "Precondition"); | |
5058 HeapRegion* res = NULL; | |
5059 bool first = true; | |
5060 while (res == NULL) { | |
5061 if (zero_filled || !first) { | |
5062 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5063 res = pop_free_region_list_locked(); | |
5064 if (res != NULL) { | |
5065 assert(!res->zero_fill_is_allocated(), | |
5066 "No allocated regions on free list."); | |
5067 res->set_zero_fill_allocated(); | |
5068 } else if (!first) { | |
5069 break; // We tried both, time to return NULL. | |
5070 } | |
5071 } | |
5072 | |
5073 if (res == NULL) { | |
5074 res = alloc_region_from_unclean_list(zero_filled); | |
5075 } | |
5076 assert(res == NULL || | |
5077 !zero_filled || | |
5078 res->zero_fill_is_allocated(), | |
5079 "We must have allocated the region we're returning"); | |
5080 first = false; | |
5081 } | |
5082 return res; | |
5083 } | |
5084 | |
5085 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
5086 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5087 { | |
5088 HeapRegion* prev = NULL; | |
5089 HeapRegion* cur = _unclean_region_list.hd(); | |
5090 while (cur != NULL) { | |
5091 HeapRegion* next = cur->next_from_unclean_list(); | |
5092 if (cur->zero_fill_is_allocated()) { | |
5093 // Remove from the list. | |
5094 if (prev == NULL) { | |
5095 (void)_unclean_region_list.pop(); | |
5096 } else { | |
5097 _unclean_region_list.delete_after(prev); | |
5098 } | |
5099 cur->set_on_unclean_list(false); | |
5100 cur->set_next_on_unclean_list(NULL); | |
5101 } else { | |
5102 prev = cur; | |
5103 } | |
5104 cur = next; | |
5105 } | |
5106 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
5107 "Inv"); | |
5108 } | |
5109 | |
5110 { | |
5111 HeapRegion* prev = NULL; | |
5112 HeapRegion* cur = _free_region_list; | |
5113 while (cur != NULL) { | |
5114 HeapRegion* next = cur->next_from_free_list(); | |
5115 if (cur->zero_fill_is_allocated()) { | |
5116 // Remove from the list. | |
5117 if (prev == NULL) { | |
5118 _free_region_list = cur->next_from_free_list(); | |
5119 } else { | |
5120 prev->set_next_on_free_list(cur->next_from_free_list()); | |
5121 } | |
5122 cur->set_on_free_list(false); | |
5123 cur->set_next_on_free_list(NULL); | |
5124 _free_region_list_size--; | |
5125 } else { | |
5126 prev = cur; | |
5127 } | |
5128 cur = next; | |
5129 } | |
5130 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
5131 } | |
5132 } | |
5133 | |
5134 bool G1CollectedHeap::verify_region_lists() { | |
5135 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5136 return verify_region_lists_locked(); | |
5137 } | |
5138 | |
5139 bool G1CollectedHeap::verify_region_lists_locked() { | |
5140 HeapRegion* unclean = _unclean_region_list.hd(); | |
5141 while (unclean != NULL) { | |
5142 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
5143 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
5144 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
5145 "Everything else is possible."); | |
5146 unclean = unclean->next_from_unclean_list(); | |
5147 } | |
5148 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
5149 | |
5150 HeapRegion* free_r = _free_region_list; | |
5151 while (free_r != NULL) { | |
5152 assert(free_r->is_on_free_list(), "Well, it is!"); | |
5153 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
5154 switch (free_r->zero_fill_state()) { | |
5155 case HeapRegion::NotZeroFilled: | |
5156 case HeapRegion::ZeroFilling: | |
5157 guarantee(false, "Should not be on free list."); | |
5158 break; | |
5159 default: | |
5160 // Everything else is possible. | |
5161 break; | |
5162 } | |
5163 free_r = free_r->next_from_free_list(); | |
5164 } | |
5165 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
5166 // If we didn't do an assertion... | |
5167 return true; | |
5168 } | |
5169 | |
5170 size_t G1CollectedHeap::free_region_list_length() { | |
5171 assert(ZF_mon->owned_by_self(), "precondition."); | |
5172 size_t len = 0; | |
5173 HeapRegion* cur = _free_region_list; | |
5174 while (cur != NULL) { | |
5175 len++; | |
5176 cur = cur->next_from_free_list(); | |
5177 } | |
5178 return len; | |
5179 } | |
5180 | |
5181 size_t G1CollectedHeap::unclean_region_list_length() { | |
5182 assert(ZF_mon->owned_by_self(), "precondition."); | |
5183 return _unclean_region_list.length(); | |
5184 } | |
5185 | |
5186 size_t G1CollectedHeap::n_regions() { | |
5187 return _hrs->length(); | |
5188 } | |
5189 | |
5190 size_t G1CollectedHeap::max_regions() { | |
5191 return | |
5192 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
5193 HeapRegion::GrainBytes; | |
5194 } | |
5195 | |
5196 size_t G1CollectedHeap::free_regions() { | |
5197 /* Possibly-expensive assert. | |
5198 assert(_free_regions == count_free_regions(), | |
5199 "_free_regions is off."); | |
5200 */ | |
5201 return _free_regions; | |
5202 } | |
5203 | |
5204 bool G1CollectedHeap::should_zf() { | |
5205 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
5206 } | |
5207 | |
5208 class RegionCounter: public HeapRegionClosure { | |
5209 size_t _n; | |
5210 public: | |
5211 RegionCounter() : _n(0) {} | |
5212 bool doHeapRegion(HeapRegion* r) { | |
677 | 5213 if (r->is_empty()) { |
342 | 5214 assert(!r->isHumongous(), "H regions should not be empty."); |
5215 _n++; | |
5216 } | |
5217 return false; | |
5218 } | |
5219 int res() { return (int) _n; } | |
5220 }; | |
5221 | |
5222 size_t G1CollectedHeap::count_free_regions() { | |
5223 RegionCounter rc; | |
5224 heap_region_iterate(&rc); | |
5225 size_t n = rc.res(); | |
5226 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
5227 n--; | |
5228 return n; | |
5229 } | |
5230 | |
5231 size_t G1CollectedHeap::count_free_regions_list() { | |
5232 size_t n = 0; | |
5233 size_t o = 0; | |
5234 ZF_mon->lock_without_safepoint_check(); | |
5235 HeapRegion* cur = _free_region_list; | |
5236 while (cur != NULL) { | |
5237 cur = cur->next_from_free_list(); | |
5238 n++; | |
5239 } | |
5240 size_t m = unclean_region_list_length(); | |
5241 ZF_mon->unlock(); | |
5242 return n + m; | |
5243 } | |
5244 | |
5245 bool G1CollectedHeap::should_set_young_locked() { | |
5246 assert(heap_lock_held_for_gc(), | |
5247 "the heap lock should already be held by or for this thread"); | |
5248 return (g1_policy()->in_young_gc_mode() && | |
5249 g1_policy()->should_add_next_region_to_young_list()); | |
5250 } | |
5251 | |
5252 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
5253 assert(heap_lock_held_for_gc(), | |
5254 "the heap lock should already be held by or for this thread"); | |
5255 _young_list->push_region(hr); | |
5256 g1_policy()->set_region_short_lived(hr); | |
5257 } | |
5258 | |
5259 class NoYoungRegionsClosure: public HeapRegionClosure { | |
5260 private: | |
5261 bool _success; | |
5262 public: | |
5263 NoYoungRegionsClosure() : _success(true) { } | |
5264 bool doHeapRegion(HeapRegion* r) { | |
5265 if (r->is_young()) { | |
5266 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
5267 r->bottom(), r->end()); | |
5268 _success = false; | |
5269 } | |
5270 return false; | |
5271 } | |
5272 bool success() { return _success; } | |
5273 }; | |
5274 | |
5275 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, | |
5276 bool check_sample) { | |
5277 bool ret = true; | |
5278 | |
5279 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); | |
5280 if (!ignore_scan_only_list) { | |
5281 NoYoungRegionsClosure closure; | |
5282 heap_region_iterate(&closure); | |
5283 ret = ret && closure.success(); | |
5284 } | |
5285 | |
5286 return ret; | |
5287 } | |
5288 | |
5289 void G1CollectedHeap::empty_young_list() { | |
5290 assert(heap_lock_held_for_gc(), | |
5291 "the heap lock should already be held by or for this thread"); | |
5292 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
5293 | |
5294 _young_list->empty_list(); | |
5295 } | |
5296 | |
5297 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
5298 bool no_allocs = true; | |
5299 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
5300 HeapRegion* r = _gc_alloc_regions[ap]; | |
5301 no_allocs = r == NULL || r->saved_mark_at_top(); | |
5302 } | |
5303 return no_allocs; | |
5304 } | |
5305 | |
545 | 5306 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 5307 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
5308 HeapRegion* r = _gc_alloc_regions[ap]; | |
5309 if (r != NULL) { | |
5310 // Check for aliases. | |
5311 bool has_processed_alias = false; | |
5312 for (int i = 0; i < ap; ++i) { | |
5313 if (_gc_alloc_regions[i] == r) { | |
5314 has_processed_alias = true; | |
5315 break; | |
5316 } | |
5317 } | |
5318 if (!has_processed_alias) { | |
545 | 5319 retire_alloc_region(r, false /* par */); |
342 | 5320 } |
5321 } | |
5322 } | |
5323 } | |
5324 | |
5325 | |
5326 // Done at the start of full GC. | |
5327 void G1CollectedHeap::tear_down_region_lists() { | |
5328 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5329 while (pop_unclean_region_list_locked() != NULL) ; | |
5330 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
5331 "Postconditions of loop.") | |
5332 while (pop_free_region_list_locked() != NULL) ; | |
5333 assert(_free_region_list == NULL, "Postcondition of loop."); | |
5334 if (_free_region_list_size != 0) { | |
5335 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
5336 print(); | |
5337 } | |
5338 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
5339 } | |
5340 | |
5341 | |
5342 class RegionResetter: public HeapRegionClosure { | |
5343 G1CollectedHeap* _g1; | |
5344 int _n; | |
5345 public: | |
5346 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5347 bool doHeapRegion(HeapRegion* r) { | |
5348 if (r->continuesHumongous()) return false; | |
5349 if (r->top() > r->bottom()) { | |
5350 if (r->top() < r->end()) { | |
5351 Copy::fill_to_words(r->top(), | |
5352 pointer_delta(r->end(), r->top())); | |
5353 } | |
5354 r->set_zero_fill_allocated(); | |
5355 } else { | |
5356 assert(r->is_empty(), "tautology"); | |
677 | 5357 _n++; |
5358 switch (r->zero_fill_state()) { | |
342 | 5359 case HeapRegion::NotZeroFilled: |
5360 case HeapRegion::ZeroFilling: | |
5361 _g1->put_region_on_unclean_list_locked(r); | |
5362 break; | |
5363 case HeapRegion::Allocated: | |
5364 r->set_zero_fill_complete(); | |
5365 // no break; go on to put on free list. | |
5366 case HeapRegion::ZeroFilled: | |
5367 _g1->put_free_region_on_list_locked(r); | |
5368 break; | |
5369 } | |
5370 } | |
5371 return false; | |
5372 } | |
5373 | |
5374 int getFreeRegionCount() {return _n;} | |
5375 }; | |
5376 | |
5377 // Done at the end of full GC. | |
5378 void G1CollectedHeap::rebuild_region_lists() { | |
5379 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5380 // This needs to go at the end of the full GC. | |
5381 RegionResetter rs; | |
5382 heap_region_iterate(&rs); | |
5383 _free_regions = rs.getFreeRegionCount(); | |
5384 // Tell the ZF thread it may have work to do. | |
5385 if (should_zf()) ZF_mon->notify_all(); | |
5386 } | |
5387 | |
5388 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
5389 G1CollectedHeap* _g1; | |
5390 int _n; | |
5391 public: | |
5392 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5393 bool doHeapRegion(HeapRegion* r) { | |
5394 if (r->continuesHumongous()) return false; | |
5395 if (r->top() > r->bottom()) { | |
5396 // There are assertions in "set_zero_fill_needed()" below that | |
5397 // require top() == bottom(), so this is technically illegal. | |
5398 // We'll skirt the law here, by making that true temporarily. | |
5399 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5400 r->set_top(r->bottom())); | |
5401 r->set_zero_fill_needed(); | |
5402 DEBUG_ONLY(r->set_top(save_top)); | |
5403 } | |
5404 return false; | |
5405 } | |
5406 }; | |
5407 | |
5408 // Done at the start of full GC. | |
5409 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5410 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5411 // This needs to go at the end of the full GC. | |
5412 UsedRegionsNeedZeroFillSetter rs; | |
5413 heap_region_iterate(&rs); | |
5414 } | |
5415 | |
5416 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5417 _refine_cte_cl->set_concurrent(concurrent); | |
5418 } | |
5419 | |
5420 #ifndef PRODUCT | |
5421 | |
5422 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5423 public: | |
5424 bool doHeapRegion(HeapRegion *r) { | |
5425 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5426 if (r != NULL) { | |
5427 if (r->is_on_free_list()) | |
5428 gclog_or_tty->print("Free "); | |
5429 if (r->is_young()) | |
5430 gclog_or_tty->print("Young "); | |
5431 if (r->isHumongous()) | |
5432 gclog_or_tty->print("Is Humongous "); | |
5433 r->print(); | |
5434 } | |
5435 return false; | |
5436 } | |
5437 }; | |
5438 | |
5439 class SortHeapRegionClosure : public HeapRegionClosure { | |
5440 size_t young_regions,free_regions, unclean_regions; | |
5441 size_t hum_regions, count; | |
5442 size_t unaccounted, cur_unclean, cur_alloc; | |
5443 size_t total_free; | |
5444 HeapRegion* cur; | |
5445 public: | |
5446 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5447 free_regions(0), unclean_regions(0), | |
5448 hum_regions(0), | |
5449 count(0), unaccounted(0), | |
5450 cur_alloc(0), total_free(0) | |
5451 {} | |
5452 bool doHeapRegion(HeapRegion *r) { | |
5453 count++; | |
5454 if (r->is_on_free_list()) free_regions++; | |
5455 else if (r->is_on_unclean_list()) unclean_regions++; | |
5456 else if (r->isHumongous()) hum_regions++; | |
5457 else if (r->is_young()) young_regions++; | |
5458 else if (r == cur) cur_alloc++; | |
5459 else unaccounted++; | |
5460 return false; | |
5461 } | |
5462 void print() { | |
5463 total_free = free_regions + unclean_regions; | |
5464 gclog_or_tty->print("%d regions\n", count); | |
5465 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5466 total_free, free_regions, unclean_regions); | |
5467 gclog_or_tty->print("%d humongous %d young\n", | |
5468 hum_regions, young_regions); | |
5469 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5470 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5471 } | |
5472 }; | |
5473 | |
5474 void G1CollectedHeap::print_region_counts() { | |
5475 SortHeapRegionClosure sc(_cur_alloc_region); | |
5476 PrintHeapRegionClosure cl; | |
5477 heap_region_iterate(&cl); | |
5478 heap_region_iterate(&sc); | |
5479 sc.print(); | |
5480 print_region_accounting_info(); | |
5481 }; | |
5482 | |
5483 bool G1CollectedHeap::regions_accounted_for() { | |
5484 // TODO: regions accounting for young/survivor/tenured | |
5485 return true; | |
5486 } | |
5487 | |
5488 bool G1CollectedHeap::print_region_accounting_info() { | |
5489 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5490 free_regions(), | |
5491 count_free_regions(), count_free_regions_list(), | |
5492 _free_region_list_size, _unclean_region_list.sz()); | |
5493 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5494 (_cur_alloc_region == NULL ? 0 : 1)); | |
5495 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5496 | |
5497 // TODO: check regions accounting for young/survivor/tenured | |
5498 return true; | |
5499 } | |
5500 | |
5501 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5502 HeapRegion* hr = heap_region_containing(p); | |
5503 if (hr == NULL) { | |
5504 return is_in_permanent(p); | |
5505 } else { | |
5506 return hr->is_in(p); | |
5507 } | |
5508 } | |
5509 #endif // PRODUCT | |
5510 | |
5511 void G1CollectedHeap::g1_unimplemented() { | |
5512 // Unimplemented(); | |
5513 } | |
5514 | |
5515 | |
5516 // Local Variables: *** | |
5517 // c-indentation-style: gnu *** | |
5518 // End: *** |