Mercurial > hg > graal-jvmci-8
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 889:15c5903cf9e1
6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp
author | johnc |
---|---|
date | Mon, 03 Aug 2009 12:59:30 -0700 |
parents | 83b687ce3090 |
children | 6cb8e9df7174 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
28 // turn it on so that the contents of the young list (scan-only / | |
29 // to-be-collected) are printed at "strategic" points before / during | |
30 // / after the collection --- this is useful for debugging | |
31 #define SCAN_ONLY_VERBOSE 0 | |
32 // CURRENT STATUS | |
33 // This file is under construction. Search for "FIXME". | |
34 | |
35 // INVARIANTS/NOTES | |
36 // | |
37 // All allocation activity covered by the G1CollectedHeap interface is | |
38 // serialized by acquiring the HeapLock. This happens in | |
39 // mem_allocate_work, which all such allocation functions call. | |
40 // (Note that this does not apply to TLAB allocation, which is not part | |
41 // of this interface: it is done by clients of this interface.) | |
42 | |
43 // Local to this file. | |
44 | |
45 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
46 SuspendibleThreadSet* _sts; | |
47 G1RemSet* _g1rs; | |
48 ConcurrentG1Refine* _cg1r; | |
49 bool _concurrent; | |
50 public: | |
51 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
52 G1RemSet* g1rs, | |
53 ConcurrentG1Refine* cg1r) : | |
54 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
55 {} | |
56 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
57 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); | |
58 if (_concurrent && _sts->should_yield()) { | |
59 // Caller will actually yield. | |
60 return false; | |
61 } | |
62 // Otherwise, we finished successfully; return true. | |
63 return true; | |
64 } | |
65 void set_concurrent(bool b) { _concurrent = b; } | |
66 }; | |
67 | |
68 | |
69 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
70 int _calls; | |
71 G1CollectedHeap* _g1h; | |
72 CardTableModRefBS* _ctbs; | |
73 int _histo[256]; | |
74 public: | |
75 ClearLoggedCardTableEntryClosure() : | |
76 _calls(0) | |
77 { | |
78 _g1h = G1CollectedHeap::heap(); | |
79 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
80 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
81 } | |
82 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
83 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
84 _calls++; | |
85 unsigned char* ujb = (unsigned char*)card_ptr; | |
86 int ind = (int)(*ujb); | |
87 _histo[ind]++; | |
88 *card_ptr = -1; | |
89 } | |
90 return true; | |
91 } | |
92 int calls() { return _calls; } | |
93 void print_histo() { | |
94 gclog_or_tty->print_cr("Card table value histogram:"); | |
95 for (int i = 0; i < 256; i++) { | |
96 if (_histo[i] != 0) { | |
97 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
98 } | |
99 } | |
100 } | |
101 }; | |
102 | |
103 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
104 int _calls; | |
105 G1CollectedHeap* _g1h; | |
106 CardTableModRefBS* _ctbs; | |
107 public: | |
108 RedirtyLoggedCardTableEntryClosure() : | |
109 _calls(0) | |
110 { | |
111 _g1h = G1CollectedHeap::heap(); | |
112 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
113 } | |
114 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
115 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
116 _calls++; | |
117 *card_ptr = 0; | |
118 } | |
119 return true; | |
120 } | |
121 int calls() { return _calls; } | |
122 }; | |
123 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
124 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
125 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
126 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
127 *card_ptr = CardTableModRefBS::dirty_card_val(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
128 return true; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
129 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
130 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
131 |
342 | 132 YoungList::YoungList(G1CollectedHeap* g1h) |
133 : _g1h(g1h), _head(NULL), | |
134 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), | |
135 _length(0), _scan_only_length(0), | |
136 _last_sampled_rs_lengths(0), | |
545 | 137 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
342 | 138 { |
139 guarantee( check_list_empty(false), "just making sure..." ); | |
140 } | |
141 | |
142 void YoungList::push_region(HeapRegion *hr) { | |
143 assert(!hr->is_young(), "should not already be young"); | |
144 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
145 | |
146 hr->set_next_young_region(_head); | |
147 _head = hr; | |
148 | |
149 hr->set_young(); | |
150 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
151 ++_length; | |
152 } | |
153 | |
154 void YoungList::add_survivor_region(HeapRegion* hr) { | |
545 | 155 assert(hr->is_survivor(), "should be flagged as survivor region"); |
342 | 156 assert(hr->get_next_young_region() == NULL, "cause it should!"); |
157 | |
158 hr->set_next_young_region(_survivor_head); | |
159 if (_survivor_head == NULL) { | |
545 | 160 _survivor_tail = hr; |
342 | 161 } |
162 _survivor_head = hr; | |
163 | |
164 ++_survivor_length; | |
165 } | |
166 | |
167 HeapRegion* YoungList::pop_region() { | |
168 while (_head != NULL) { | |
169 assert( length() > 0, "list should not be empty" ); | |
170 HeapRegion* ret = _head; | |
171 _head = ret->get_next_young_region(); | |
172 ret->set_next_young_region(NULL); | |
173 --_length; | |
174 assert(ret->is_young(), "region should be very young"); | |
175 | |
176 // Replace 'Survivor' region type with 'Young'. So the region will | |
177 // be treated as a young region and will not be 'confused' with | |
178 // newly created survivor regions. | |
179 if (ret->is_survivor()) { | |
180 ret->set_young(); | |
181 } | |
182 | |
183 if (!ret->is_scan_only()) { | |
184 return ret; | |
185 } | |
186 | |
187 // scan-only, we'll add it to the scan-only list | |
188 if (_scan_only_tail == NULL) { | |
189 guarantee( _scan_only_head == NULL, "invariant" ); | |
190 | |
191 _scan_only_head = ret; | |
192 _curr_scan_only = ret; | |
193 } else { | |
194 guarantee( _scan_only_head != NULL, "invariant" ); | |
195 _scan_only_tail->set_next_young_region(ret); | |
196 } | |
197 guarantee( ret->get_next_young_region() == NULL, "invariant" ); | |
198 _scan_only_tail = ret; | |
199 | |
200 // no need to be tagged as scan-only any more | |
201 ret->set_young(); | |
202 | |
203 ++_scan_only_length; | |
204 } | |
205 assert( length() == 0, "list should be empty" ); | |
206 return NULL; | |
207 } | |
208 | |
209 void YoungList::empty_list(HeapRegion* list) { | |
210 while (list != NULL) { | |
211 HeapRegion* next = list->get_next_young_region(); | |
212 list->set_next_young_region(NULL); | |
213 list->uninstall_surv_rate_group(); | |
214 list->set_not_young(); | |
215 list = next; | |
216 } | |
217 } | |
218 | |
219 void YoungList::empty_list() { | |
220 assert(check_list_well_formed(), "young list should be well formed"); | |
221 | |
222 empty_list(_head); | |
223 _head = NULL; | |
224 _length = 0; | |
225 | |
226 empty_list(_scan_only_head); | |
227 _scan_only_head = NULL; | |
228 _scan_only_tail = NULL; | |
229 _scan_only_length = 0; | |
230 _curr_scan_only = NULL; | |
231 | |
232 empty_list(_survivor_head); | |
233 _survivor_head = NULL; | |
545 | 234 _survivor_tail = NULL; |
342 | 235 _survivor_length = 0; |
236 | |
237 _last_sampled_rs_lengths = 0; | |
238 | |
239 assert(check_list_empty(false), "just making sure..."); | |
240 } | |
241 | |
242 bool YoungList::check_list_well_formed() { | |
243 bool ret = true; | |
244 | |
245 size_t length = 0; | |
246 HeapRegion* curr = _head; | |
247 HeapRegion* last = NULL; | |
248 while (curr != NULL) { | |
249 if (!curr->is_young() || curr->is_scan_only()) { | |
250 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
251 "incorrectly tagged (%d, %d)", | |
252 curr->bottom(), curr->end(), | |
253 curr->is_young(), curr->is_scan_only()); | |
254 ret = false; | |
255 } | |
256 ++length; | |
257 last = curr; | |
258 curr = curr->get_next_young_region(); | |
259 } | |
260 ret = ret && (length == _length); | |
261 | |
262 if (!ret) { | |
263 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
264 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
265 length, _length); | |
266 } | |
267 | |
268 bool scan_only_ret = true; | |
269 length = 0; | |
270 curr = _scan_only_head; | |
271 last = NULL; | |
272 while (curr != NULL) { | |
273 if (!curr->is_young() || curr->is_scan_only()) { | |
274 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
275 "incorrectly tagged (%d, %d)", | |
276 curr->bottom(), curr->end(), | |
277 curr->is_young(), curr->is_scan_only()); | |
278 scan_only_ret = false; | |
279 } | |
280 ++length; | |
281 last = curr; | |
282 curr = curr->get_next_young_region(); | |
283 } | |
284 scan_only_ret = scan_only_ret && (length == _scan_only_length); | |
285 | |
286 if ( (last != _scan_only_tail) || | |
287 (_scan_only_head == NULL && _scan_only_tail != NULL) || | |
288 (_scan_only_head != NULL && _scan_only_tail == NULL) ) { | |
289 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); | |
290 scan_only_ret = false; | |
291 } | |
292 | |
293 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { | |
294 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); | |
295 scan_only_ret = false; | |
296 } | |
297 | |
298 if (!scan_only_ret) { | |
299 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); | |
300 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", | |
301 length, _scan_only_length); | |
302 } | |
303 | |
304 return ret && scan_only_ret; | |
305 } | |
306 | |
307 bool YoungList::check_list_empty(bool ignore_scan_only_list, | |
308 bool check_sample) { | |
309 bool ret = true; | |
310 | |
311 if (_length != 0) { | |
312 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
313 _length); | |
314 ret = false; | |
315 } | |
316 if (check_sample && _last_sampled_rs_lengths != 0) { | |
317 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
318 ret = false; | |
319 } | |
320 if (_head != NULL) { | |
321 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
322 ret = false; | |
323 } | |
324 if (!ret) { | |
325 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
326 } | |
327 | |
328 if (ignore_scan_only_list) | |
329 return ret; | |
330 | |
331 bool scan_only_ret = true; | |
332 if (_scan_only_length != 0) { | |
333 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", | |
334 _scan_only_length); | |
335 scan_only_ret = false; | |
336 } | |
337 if (_scan_only_head != NULL) { | |
338 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); | |
339 scan_only_ret = false; | |
340 } | |
341 if (_scan_only_tail != NULL) { | |
342 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); | |
343 scan_only_ret = false; | |
344 } | |
345 if (!scan_only_ret) { | |
346 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); | |
347 } | |
348 | |
349 return ret && scan_only_ret; | |
350 } | |
351 | |
352 void | |
353 YoungList::rs_length_sampling_init() { | |
354 _sampled_rs_lengths = 0; | |
355 _curr = _head; | |
356 } | |
357 | |
358 bool | |
359 YoungList::rs_length_sampling_more() { | |
360 return _curr != NULL; | |
361 } | |
362 | |
363 void | |
364 YoungList::rs_length_sampling_next() { | |
365 assert( _curr != NULL, "invariant" ); | |
366 _sampled_rs_lengths += _curr->rem_set()->occupied(); | |
367 _curr = _curr->get_next_young_region(); | |
368 if (_curr == NULL) { | |
369 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
370 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
371 } | |
372 } | |
373 | |
374 void | |
375 YoungList::reset_auxilary_lists() { | |
376 // We could have just "moved" the scan-only list to the young list. | |
377 // However, the scan-only list is ordered according to the region | |
378 // age in descending order, so, by moving one entry at a time, we | |
379 // ensure that it is recreated in ascending order. | |
380 | |
381 guarantee( is_empty(), "young list should be empty" ); | |
382 assert(check_list_well_formed(), "young list should be well formed"); | |
383 | |
384 // Add survivor regions to SurvRateGroup. | |
385 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
545 | 386 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
342 | 387 for (HeapRegion* curr = _survivor_head; |
388 curr != NULL; | |
389 curr = curr->get_next_young_region()) { | |
390 _g1h->g1_policy()->set_region_survivors(curr); | |
391 } | |
392 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
393 | |
394 if (_survivor_head != NULL) { | |
395 _head = _survivor_head; | |
396 _length = _survivor_length + _scan_only_length; | |
545 | 397 _survivor_tail->set_next_young_region(_scan_only_head); |
342 | 398 } else { |
399 _head = _scan_only_head; | |
400 _length = _scan_only_length; | |
401 } | |
402 | |
403 for (HeapRegion* curr = _scan_only_head; | |
404 curr != NULL; | |
405 curr = curr->get_next_young_region()) { | |
406 curr->recalculate_age_in_surv_rate_group(); | |
407 } | |
408 _scan_only_head = NULL; | |
409 _scan_only_tail = NULL; | |
410 _scan_only_length = 0; | |
411 _curr_scan_only = NULL; | |
412 | |
413 _survivor_head = NULL; | |
545 | 414 _survivor_tail = NULL; |
342 | 415 _survivor_length = 0; |
545 | 416 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
342 | 417 |
418 assert(check_list_well_formed(), "young list should be well formed"); | |
419 } | |
420 | |
421 void YoungList::print() { | |
422 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; | |
423 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; | |
424 | |
425 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
426 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
427 HeapRegion *curr = lists[list]; | |
428 if (curr == NULL) | |
429 gclog_or_tty->print_cr(" empty"); | |
430 while (curr != NULL) { | |
431 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
432 "age: %4d, y: %d, s-o: %d, surv: %d", | |
433 curr->bottom(), curr->end(), | |
434 curr->top(), | |
435 curr->prev_top_at_mark_start(), | |
436 curr->next_top_at_mark_start(), | |
437 curr->top_at_conc_mark_count(), | |
438 curr->age_in_surv_rate_group_cond(), | |
439 curr->is_young(), | |
440 curr->is_scan_only(), | |
441 curr->is_survivor()); | |
442 curr = curr->get_next_young_region(); | |
443 } | |
444 } | |
445 | |
446 gclog_or_tty->print_cr(""); | |
447 } | |
448 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
449 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
450 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
451 // Claim the right to put the region on the dirty cards region list |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
452 // by installing a self pointer. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
453 HeapRegion* next = hr->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
454 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
455 HeapRegion* res = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
456 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
457 NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
458 if (res == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
459 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
460 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
461 // Put the region to the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
462 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
463 next = (HeapRegion*) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
464 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
465 if (next == head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
466 assert(hr->get_next_dirty_cards_region() == hr, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
467 "hr->get_next_dirty_cards_region() != hr"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
468 if (next == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
469 // The last region in the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
470 hr->set_next_dirty_cards_region(hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
471 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
472 hr->set_next_dirty_cards_region(next); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
473 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
474 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
475 } while (next != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
476 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
477 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
478 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
479 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
480 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
481 { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
482 HeapRegion* head; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
483 HeapRegion* hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
484 do { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
485 head = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
486 if (head == NULL) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
487 return NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
488 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
489 HeapRegion* new_head = head->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
490 if (head == new_head) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
491 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
492 new_head = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
493 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
494 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
495 head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
496 } while (hr != head); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
497 assert(hr != NULL, "invariant"); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
498 hr->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
499 return hr; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
500 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
501 |
342 | 502 void G1CollectedHeap::stop_conc_gc_threads() { |
794 | 503 _cg1r->stop(); |
342 | 504 _czft->stop(); |
505 _cmThread->stop(); | |
506 } | |
507 | |
508 | |
509 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
510 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
511 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
512 | |
513 // Count the dirty cards at the start. | |
514 CountNonCleanMemRegionClosure count1(this); | |
515 ct_bs->mod_card_iterate(&count1); | |
516 int orig_count = count1.n(); | |
517 | |
518 // First clear the logged cards. | |
519 ClearLoggedCardTableEntryClosure clear; | |
520 dcqs.set_closure(&clear); | |
521 dcqs.apply_closure_to_all_completed_buffers(); | |
522 dcqs.iterate_closure_all_threads(false); | |
523 clear.print_histo(); | |
524 | |
525 // Now ensure that there's no dirty cards. | |
526 CountNonCleanMemRegionClosure count2(this); | |
527 ct_bs->mod_card_iterate(&count2); | |
528 if (count2.n() != 0) { | |
529 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
530 count2.n(), orig_count); | |
531 } | |
532 guarantee(count2.n() == 0, "Card table should be clean."); | |
533 | |
534 RedirtyLoggedCardTableEntryClosure redirty; | |
535 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
536 dcqs.apply_closure_to_all_completed_buffers(); | |
537 dcqs.iterate_closure_all_threads(false); | |
538 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
539 clear.calls(), orig_count); | |
540 guarantee(redirty.calls() == clear.calls(), | |
541 "Or else mechanism is broken."); | |
542 | |
543 CountNonCleanMemRegionClosure count3(this); | |
544 ct_bs->mod_card_iterate(&count3); | |
545 if (count3.n() != orig_count) { | |
546 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
547 orig_count, count3.n()); | |
548 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
549 } | |
550 | |
551 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
552 } | |
553 | |
554 // Private class members. | |
555 | |
556 G1CollectedHeap* G1CollectedHeap::_g1h; | |
557 | |
558 // Private methods. | |
559 | |
560 // Finds a HeapRegion that can be used to allocate a given size of block. | |
561 | |
562 | |
563 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
564 bool do_expand, | |
565 bool zero_filled) { | |
566 ConcurrentZFThread::note_region_alloc(); | |
567 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
568 if (res == NULL && do_expand) { | |
569 expand(word_size * HeapWordSize); | |
570 res = alloc_free_region_from_lists(zero_filled); | |
571 assert(res == NULL || | |
572 (!res->isHumongous() && | |
573 (!zero_filled || | |
574 res->zero_fill_state() == HeapRegion::Allocated)), | |
575 "Alloc Regions must be zero filled (and non-H)"); | |
576 } | |
577 if (res != NULL && res->is_empty()) _free_regions--; | |
578 assert(res == NULL || | |
579 (!res->isHumongous() && | |
580 (!zero_filled || | |
581 res->zero_fill_state() == HeapRegion::Allocated)), | |
582 "Non-young alloc Regions must be zero filled (and non-H)"); | |
583 | |
751 | 584 if (G1PrintRegions) { |
342 | 585 if (res != NULL) { |
586 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
587 "top "PTR_FORMAT, | |
588 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
589 } | |
590 } | |
591 | |
592 return res; | |
593 } | |
594 | |
595 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
596 size_t word_size, | |
597 bool zero_filled) { | |
598 HeapRegion* alloc_region = NULL; | |
599 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
600 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
601 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
545 | 602 alloc_region->set_survivor(); |
342 | 603 } |
604 ++_gc_alloc_region_counts[purpose]; | |
605 } else { | |
606 g1_policy()->note_alloc_region_limit_reached(purpose); | |
607 } | |
608 return alloc_region; | |
609 } | |
610 | |
611 // If could fit into free regions w/o expansion, try. | |
612 // Otherwise, if can expand, do so. | |
613 // Otherwise, if using ex regions might help, try with ex given back. | |
614 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
615 assert(regions_accounted_for(), "Region leakage!"); | |
616 | |
617 // We can't allocate H regions while cleanupComplete is running, since | |
618 // some of the regions we find to be empty might not yet be added to the | |
619 // unclean list. (If we're already at a safepoint, this call is | |
620 // unnecessary, not to mention wrong.) | |
621 if (!SafepointSynchronize::is_at_safepoint()) | |
622 wait_for_cleanup_complete(); | |
623 | |
624 size_t num_regions = | |
625 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
626 | |
627 // Special case if < one region??? | |
628 | |
629 // Remember the ft size. | |
630 size_t x_size = expansion_regions(); | |
631 | |
632 HeapWord* res = NULL; | |
633 bool eliminated_allocated_from_lists = false; | |
634 | |
635 // Can the allocation potentially fit in the free regions? | |
636 if (free_regions() >= num_regions) { | |
637 res = _hrs->obj_allocate(word_size); | |
638 } | |
639 if (res == NULL) { | |
640 // Try expansion. | |
641 size_t fs = _hrs->free_suffix(); | |
642 if (fs + x_size >= num_regions) { | |
643 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
644 res = _hrs->obj_allocate(word_size); | |
645 assert(res != NULL, "This should have worked."); | |
646 } else { | |
647 // Expansion won't help. Are there enough free regions if we get rid | |
648 // of reservations? | |
649 size_t avail = free_regions(); | |
650 if (avail >= num_regions) { | |
651 res = _hrs->obj_allocate(word_size); | |
652 if (res != NULL) { | |
653 remove_allocated_regions_from_lists(); | |
654 eliminated_allocated_from_lists = true; | |
655 } | |
656 } | |
657 } | |
658 } | |
659 if (res != NULL) { | |
660 // Increment by the number of regions allocated. | |
661 // FIXME: Assumes regions all of size GrainBytes. | |
662 #ifndef PRODUCT | |
663 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
664 HeapRegion::GrainWords)); | |
665 #endif | |
666 if (!eliminated_allocated_from_lists) | |
667 remove_allocated_regions_from_lists(); | |
668 _summary_bytes_used += word_size * HeapWordSize; | |
669 _free_regions -= num_regions; | |
670 _num_humongous_regions += (int) num_regions; | |
671 } | |
672 assert(regions_accounted_for(), "Region Leakage"); | |
673 return res; | |
674 } | |
675 | |
676 HeapWord* | |
677 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
678 bool permit_collection_pause) { | |
679 HeapWord* res = NULL; | |
680 HeapRegion* allocated_young_region = NULL; | |
681 | |
682 assert( SafepointSynchronize::is_at_safepoint() || | |
683 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
684 | |
685 if (isHumongous(word_size)) { | |
686 // Allocation of a humongous object can, in a sense, complete a | |
687 // partial region, if the previous alloc was also humongous, and | |
688 // caused the test below to succeed. | |
689 if (permit_collection_pause) | |
690 do_collection_pause_if_appropriate(word_size); | |
691 res = humongousObjAllocate(word_size); | |
692 assert(_cur_alloc_region == NULL | |
693 || !_cur_alloc_region->isHumongous(), | |
694 "Prevent a regression of this bug."); | |
695 | |
696 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
697 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
698 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
699 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
700 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
701 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
702 wait_for_cleanup_complete(); |
342 | 703 // If we do a collection pause, this will be reset to a non-NULL |
704 // value. If we don't, nulling here ensures that we allocate a new | |
705 // region below. | |
706 if (_cur_alloc_region != NULL) { | |
707 // We're finished with the _cur_alloc_region. | |
708 _summary_bytes_used += _cur_alloc_region->used(); | |
709 _cur_alloc_region = NULL; | |
710 } | |
711 assert(_cur_alloc_region == NULL, "Invariant."); | |
712 // Completion of a heap region is perhaps a good point at which to do | |
713 // a collection pause. | |
714 if (permit_collection_pause) | |
715 do_collection_pause_if_appropriate(word_size); | |
716 // Make sure we have an allocation region available. | |
717 if (_cur_alloc_region == NULL) { | |
718 if (!SafepointSynchronize::is_at_safepoint()) | |
719 wait_for_cleanup_complete(); | |
720 bool next_is_young = should_set_young_locked(); | |
721 // If the next region is not young, make sure it's zero-filled. | |
722 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
723 if (_cur_alloc_region != NULL) { | |
724 _summary_bytes_used -= _cur_alloc_region->used(); | |
725 if (next_is_young) { | |
726 set_region_short_lived_locked(_cur_alloc_region); | |
727 allocated_young_region = _cur_alloc_region; | |
728 } | |
729 } | |
730 } | |
731 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
732 "Prevent a regression of this bug."); | |
733 | |
734 // Now retry the allocation. | |
735 if (_cur_alloc_region != NULL) { | |
736 res = _cur_alloc_region->allocate(word_size); | |
737 } | |
738 } | |
739 | |
740 // NOTE: fails frequently in PRT | |
741 assert(regions_accounted_for(), "Region leakage!"); | |
742 | |
743 if (res != NULL) { | |
744 if (!SafepointSynchronize::is_at_safepoint()) { | |
745 assert( permit_collection_pause, "invariant" ); | |
746 assert( Heap_lock->owned_by_self(), "invariant" ); | |
747 Heap_lock->unlock(); | |
748 } | |
749 | |
750 if (allocated_young_region != NULL) { | |
751 HeapRegion* hr = allocated_young_region; | |
752 HeapWord* bottom = hr->bottom(); | |
753 HeapWord* end = hr->end(); | |
754 MemRegion mr(bottom, end); | |
755 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
756 } | |
757 } | |
758 | |
759 assert( SafepointSynchronize::is_at_safepoint() || | |
760 (res == NULL && Heap_lock->owned_by_self()) || | |
761 (res != NULL && !Heap_lock->owned_by_self()), | |
762 "post condition of the call" ); | |
763 | |
764 return res; | |
765 } | |
766 | |
767 HeapWord* | |
768 G1CollectedHeap::mem_allocate(size_t word_size, | |
769 bool is_noref, | |
770 bool is_tlab, | |
771 bool* gc_overhead_limit_was_exceeded) { | |
772 debug_only(check_for_valid_allocation_state()); | |
773 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
774 HeapWord* result = NULL; | |
775 | |
776 // Loop until the allocation is satisified, | |
777 // or unsatisfied after GC. | |
778 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
779 int gc_count_before; | |
780 { | |
781 Heap_lock->lock(); | |
782 result = attempt_allocation(word_size); | |
783 if (result != NULL) { | |
784 // attempt_allocation should have unlocked the heap lock | |
785 assert(is_in(result), "result not in heap"); | |
786 return result; | |
787 } | |
788 // Read the gc count while the heap lock is held. | |
789 gc_count_before = SharedHeap::heap()->total_collections(); | |
790 Heap_lock->unlock(); | |
791 } | |
792 | |
793 // Create the garbage collection operation... | |
794 VM_G1CollectForAllocation op(word_size, | |
795 gc_count_before); | |
796 | |
797 // ...and get the VM thread to execute it. | |
798 VMThread::execute(&op); | |
799 if (op.prologue_succeeded()) { | |
800 result = op.result(); | |
801 assert(result == NULL || is_in(result), "result not in heap"); | |
802 return result; | |
803 } | |
804 | |
805 // Give a warning if we seem to be looping forever. | |
806 if ((QueuedAllocationWarningCount > 0) && | |
807 (try_count % QueuedAllocationWarningCount == 0)) { | |
808 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
809 try_count); | |
810 } | |
811 } | |
812 } | |
813 | |
814 void G1CollectedHeap::abandon_cur_alloc_region() { | |
815 if (_cur_alloc_region != NULL) { | |
816 // We're finished with the _cur_alloc_region. | |
817 if (_cur_alloc_region->is_empty()) { | |
818 _free_regions++; | |
819 free_region(_cur_alloc_region); | |
820 } else { | |
821 _summary_bytes_used += _cur_alloc_region->used(); | |
822 } | |
823 _cur_alloc_region = NULL; | |
824 } | |
825 } | |
826 | |
636 | 827 void G1CollectedHeap::abandon_gc_alloc_regions() { |
828 // first, make sure that the GC alloc region list is empty (it should!) | |
829 assert(_gc_alloc_region_list == NULL, "invariant"); | |
830 release_gc_alloc_regions(true /* totally */); | |
831 } | |
832 | |
342 | 833 class PostMCRemSetClearClosure: public HeapRegionClosure { |
834 ModRefBarrierSet* _mr_bs; | |
835 public: | |
836 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
837 bool doHeapRegion(HeapRegion* r) { | |
838 r->reset_gc_time_stamp(); | |
839 if (r->continuesHumongous()) | |
840 return false; | |
841 HeapRegionRemSet* hrrs = r->rem_set(); | |
842 if (hrrs != NULL) hrrs->clear(); | |
843 // You might think here that we could clear just the cards | |
844 // corresponding to the used region. But no: if we leave a dirty card | |
845 // in a region we might allocate into, then it would prevent that card | |
846 // from being enqueued, and cause it to be missed. | |
847 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
848 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
849 return false; | |
850 } | |
851 }; | |
852 | |
853 | |
854 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
855 ModRefBarrierSet* _mr_bs; | |
856 public: | |
857 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
858 bool doHeapRegion(HeapRegion* r) { | |
859 if (r->continuesHumongous()) return false; | |
860 if (r->used_region().word_size() != 0) { | |
861 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
862 } | |
863 return false; | |
864 } | |
865 }; | |
866 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
867 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
868 G1CollectedHeap* _g1h; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
869 UpdateRSOopClosure _cl; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
870 int _worker_i; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
871 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
872 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
873 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
874 _worker_i(worker_i), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
875 _g1h(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
876 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
877 bool doHeapRegion(HeapRegion* r) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
878 if (!r->continuesHumongous()) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
879 _cl.set_from(r); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
880 r->oop_iterate(&_cl); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
881 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
882 return false; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
883 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
884 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
885 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
886 class ParRebuildRSTask: public AbstractGangTask { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
887 G1CollectedHeap* _g1; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
888 public: |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
889 ParRebuildRSTask(G1CollectedHeap* g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
890 : AbstractGangTask("ParRebuildRSTask"), |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
891 _g1(g1) |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
892 { } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
893 |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
894 void work(int i) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
895 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
896 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
897 HeapRegion::RebuildRSClaimValue); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
898 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
899 }; |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
900 |
342 | 901 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, |
902 size_t word_size) { | |
903 ResourceMark rm; | |
904 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
905 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
906 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
907 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
908 |
342 | 909 if (full && DisableExplicitGC) { |
910 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); | |
911 return; | |
912 } | |
913 | |
914 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
915 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
916 | |
917 if (GC_locker::is_active()) { | |
918 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
919 } | |
920 | |
921 { | |
922 IsGCActiveMark x; | |
923 | |
924 // Timing | |
925 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
926 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
927 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); | |
928 | |
929 double start = os::elapsedTime(); | |
930 GCOverheadReporter::recordSTWStart(start); | |
931 g1_policy()->record_full_collection_start(); | |
932 | |
933 gc_prologue(true); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
934 increment_total_collections(true /* full gc */); |
342 | 935 |
936 size_t g1h_prev_used = used(); | |
937 assert(used() == recalculate_used(), "Should be equal"); | |
938 | |
939 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
940 HandleMark hm; // Discard invalid handles created during verification | |
941 prepare_for_verify(); | |
942 gclog_or_tty->print(" VerifyBeforeGC:"); | |
943 Universe::verify(true); | |
944 } | |
945 assert(regions_accounted_for(), "Region leakage!"); | |
946 | |
947 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
948 | |
949 // We want to discover references, but not process them yet. | |
950 // This mode is disabled in | |
951 // instanceRefKlass::process_discovered_references if the | |
952 // generation does some collection work, or | |
953 // instanceRefKlass::enqueue_discovered_references if the | |
954 // generation returns without doing any work. | |
955 ref_processor()->disable_discovery(); | |
956 ref_processor()->abandon_partial_discovery(); | |
957 ref_processor()->verify_no_references_recorded(); | |
958 | |
959 // Abandon current iterations of concurrent marking and concurrent | |
960 // refinement, if any are in progress. | |
961 concurrent_mark()->abort(); | |
962 | |
963 // Make sure we'll choose a new allocation region afterwards. | |
964 abandon_cur_alloc_region(); | |
636 | 965 abandon_gc_alloc_regions(); |
342 | 966 assert(_cur_alloc_region == NULL, "Invariant."); |
967 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
968 tear_down_region_lists(); | |
969 set_used_regions_to_need_zero_fill(); | |
970 if (g1_policy()->in_young_gc_mode()) { | |
971 empty_young_list(); | |
972 g1_policy()->set_full_young_gcs(true); | |
973 } | |
974 | |
975 // Temporarily make reference _discovery_ single threaded (non-MT). | |
976 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
977 | |
978 // Temporarily make refs discovery atomic | |
979 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
980 | |
981 // Temporarily clear _is_alive_non_header | |
982 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
983 | |
984 ref_processor()->enable_discovery(); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
985 ref_processor()->setup_policy(clear_all_soft_refs); |
342 | 986 |
987 // Do collection work | |
988 { | |
989 HandleMark hm; // Discard invalid handles created during gc | |
990 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); | |
991 } | |
992 // Because freeing humongous regions may have added some unclean | |
993 // regions, it is necessary to tear down again before rebuilding. | |
994 tear_down_region_lists(); | |
995 rebuild_region_lists(); | |
996 | |
997 _summary_bytes_used = recalculate_used(); | |
998 | |
999 ref_processor()->enqueue_discovered_references(); | |
1000 | |
1001 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
1002 | |
1003 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
1004 HandleMark hm; // Discard invalid handles created during verification | |
1005 gclog_or_tty->print(" VerifyAfterGC:"); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
1006 prepare_for_verify(); |
342 | 1007 Universe::verify(false); |
1008 } | |
1009 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
1010 | |
1011 reset_gc_time_stamp(); | |
1012 // Since everything potentially moved, we will clear all remembered | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1013 // sets, and clear all cards. Later we will rebuild remebered |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1014 // sets. We will also reset the GC time stamps of the regions. |
342 | 1015 PostMCRemSetClearClosure rs_clear(mr_bs()); |
1016 heap_region_iterate(&rs_clear); | |
1017 | |
1018 // Resize the heap if necessary. | |
1019 resize_if_necessary_after_full_collection(full ? 0 : word_size); | |
1020 | |
1021 if (_cg1r->use_cache()) { | |
1022 _cg1r->clear_and_record_card_counts(); | |
1023 _cg1r->clear_hot_cache(); | |
1024 } | |
1025 | |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1026 // Rebuild remembered sets of all regions. |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1027 if (ParallelGCThreads > 0) { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1028 ParRebuildRSTask rebuild_rs_task(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1029 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1030 HeapRegion::InitialClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1031 set_par_threads(workers()->total_workers()); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1032 workers()->run_task(&rebuild_rs_task); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1033 set_par_threads(0); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1034 assert(check_heap_region_claim_values( |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1035 HeapRegion::RebuildRSClaimValue), "sanity check"); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1036 reset_heap_region_claim_values(); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1037 } else { |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1038 RebuildRSOutOfRegionClosure rebuild_rs(this); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1039 heap_region_iterate(&rebuild_rs); |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1040 } |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
620
diff
changeset
|
1041 |
342 | 1042 if (PrintGC) { |
1043 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
1044 } | |
1045 | |
1046 if (true) { // FIXME | |
1047 // Ask the permanent generation to adjust size for full collections | |
1048 perm()->compute_new_size(); | |
1049 } | |
1050 | |
1051 double end = os::elapsedTime(); | |
1052 GCOverheadReporter::recordSTWEnd(end); | |
1053 g1_policy()->record_full_collection_end(); | |
1054 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1055 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1056 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1057 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
1058 |
342 | 1059 gc_epilogue(true); |
1060 | |
794 | 1061 // Discard all rset updates |
1062 JavaThread::dirty_card_queue_set().abandon_logs(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1063 assert(!G1DeferredRSUpdate |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1064 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
342 | 1065 assert(regions_accounted_for(), "Region leakage!"); |
1066 } | |
1067 | |
1068 if (g1_policy()->in_young_gc_mode()) { | |
1069 _young_list->reset_sampled_info(); | |
1070 assert( check_young_list_empty(false, false), | |
1071 "young list should be empty at this point"); | |
1072 } | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1073 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1074 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1075 Universe::print_heap_after_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
1076 } |
342 | 1077 } |
1078 | |
1079 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
1080 do_collection(true, clear_all_soft_refs, 0); | |
1081 } | |
1082 | |
1083 // This code is mostly copied from TenuredGeneration. | |
1084 void | |
1085 G1CollectedHeap:: | |
1086 resize_if_necessary_after_full_collection(size_t word_size) { | |
1087 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
1088 | |
1089 // Include the current allocation, if any, and bytes that will be | |
1090 // pre-allocated to support collections, as "used". | |
1091 const size_t used_after_gc = used(); | |
1092 const size_t capacity_after_gc = capacity(); | |
1093 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
1094 | |
1095 // We don't have floating point command-line arguments | |
1096 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; | |
1097 const double maximum_used_percentage = 1.0 - minimum_free_percentage; | |
1098 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
1099 const double minimum_used_percentage = 1.0 - maximum_free_percentage; | |
1100 | |
1101 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); | |
1102 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); | |
1103 | |
1104 // Don't shrink less than the initial size. | |
1105 minimum_desired_capacity = | |
1106 MAX2(minimum_desired_capacity, | |
1107 collector_policy()->initial_heap_byte_size()); | |
1108 maximum_desired_capacity = | |
1109 MAX2(maximum_desired_capacity, | |
1110 collector_policy()->initial_heap_byte_size()); | |
1111 | |
1112 // We are failing here because minimum_desired_capacity is | |
1113 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); | |
1114 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); | |
1115 | |
1116 if (PrintGC && Verbose) { | |
1117 const double free_percentage = ((double)free_after_gc) / capacity(); | |
1118 gclog_or_tty->print_cr("Computing new size after full GC "); | |
1119 gclog_or_tty->print_cr(" " | |
1120 " minimum_free_percentage: %6.2f", | |
1121 minimum_free_percentage); | |
1122 gclog_or_tty->print_cr(" " | |
1123 " maximum_free_percentage: %6.2f", | |
1124 maximum_free_percentage); | |
1125 gclog_or_tty->print_cr(" " | |
1126 " capacity: %6.1fK" | |
1127 " minimum_desired_capacity: %6.1fK" | |
1128 " maximum_desired_capacity: %6.1fK", | |
1129 capacity() / (double) K, | |
1130 minimum_desired_capacity / (double) K, | |
1131 maximum_desired_capacity / (double) K); | |
1132 gclog_or_tty->print_cr(" " | |
1133 " free_after_gc : %6.1fK" | |
1134 " used_after_gc : %6.1fK", | |
1135 free_after_gc / (double) K, | |
1136 used_after_gc / (double) K); | |
1137 gclog_or_tty->print_cr(" " | |
1138 " free_percentage: %6.2f", | |
1139 free_percentage); | |
1140 } | |
1141 if (capacity() < minimum_desired_capacity) { | |
1142 // Don't expand unless it's significant | |
1143 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1144 expand(expand_bytes); | |
1145 if (PrintGC && Verbose) { | |
1146 gclog_or_tty->print_cr(" expanding:" | |
1147 " minimum_desired_capacity: %6.1fK" | |
1148 " expand_bytes: %6.1fK", | |
1149 minimum_desired_capacity / (double) K, | |
1150 expand_bytes / (double) K); | |
1151 } | |
1152 | |
1153 // No expansion, now see if we want to shrink | |
1154 } else if (capacity() > maximum_desired_capacity) { | |
1155 // Capacity too large, compute shrinking size | |
1156 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1157 shrink(shrink_bytes); | |
1158 if (PrintGC && Verbose) { | |
1159 gclog_or_tty->print_cr(" " | |
1160 " shrinking:" | |
1161 " initSize: %.1fK" | |
1162 " maximum_desired_capacity: %.1fK", | |
1163 collector_policy()->initial_heap_byte_size() / (double) K, | |
1164 maximum_desired_capacity / (double) K); | |
1165 gclog_or_tty->print_cr(" " | |
1166 " shrink_bytes: %.1fK", | |
1167 shrink_bytes / (double) K); | |
1168 } | |
1169 } | |
1170 } | |
1171 | |
1172 | |
1173 HeapWord* | |
1174 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1175 HeapWord* result = NULL; | |
1176 | |
1177 // In a G1 heap, we're supposed to keep allocation from failing by | |
1178 // incremental pauses. Therefore, at least for now, we'll favor | |
1179 // expansion over collection. (This might change in the future if we can | |
1180 // do something smarter than full collection to satisfy a failed alloc.) | |
1181 | |
1182 result = expand_and_allocate(word_size); | |
1183 if (result != NULL) { | |
1184 assert(is_in(result), "result not in heap"); | |
1185 return result; | |
1186 } | |
1187 | |
1188 // OK, I guess we have to try collection. | |
1189 | |
1190 do_collection(false, false, word_size); | |
1191 | |
1192 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1193 | |
1194 if (result != NULL) { | |
1195 assert(is_in(result), "result not in heap"); | |
1196 return result; | |
1197 } | |
1198 | |
1199 // Try collecting soft references. | |
1200 do_collection(false, true, word_size); | |
1201 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1202 if (result != NULL) { | |
1203 assert(is_in(result), "result not in heap"); | |
1204 return result; | |
1205 } | |
1206 | |
1207 // What else? We might try synchronous finalization later. If the total | |
1208 // space available is large enough for the allocation, then a more | |
1209 // complete compaction phase than we've tried so far might be | |
1210 // appropriate. | |
1211 return NULL; | |
1212 } | |
1213 | |
1214 // Attempting to expand the heap sufficiently | |
1215 // to support an allocation of the given "word_size". If | |
1216 // successful, perform the allocation and return the address of the | |
1217 // allocated block, or else "NULL". | |
1218 | |
1219 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1220 size_t expand_bytes = word_size * HeapWordSize; | |
1221 if (expand_bytes < MinHeapDeltaBytes) { | |
1222 expand_bytes = MinHeapDeltaBytes; | |
1223 } | |
1224 expand(expand_bytes); | |
1225 assert(regions_accounted_for(), "Region leakage!"); | |
1226 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1227 return result; | |
1228 } | |
1229 | |
1230 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1231 size_t pre_used = 0; | |
1232 size_t cleared_h_regions = 0; | |
1233 size_t freed_regions = 0; | |
1234 UncleanRegionList local_list; | |
1235 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1236 freed_regions, &local_list); | |
1237 | |
1238 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1239 &local_list); | |
1240 return pre_used; | |
1241 } | |
1242 | |
1243 void | |
1244 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1245 size_t& pre_used, | |
1246 size_t& cleared_h, | |
1247 size_t& freed_regions, | |
1248 UncleanRegionList* list, | |
1249 bool par) { | |
1250 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1251 size_t res = 0; | |
677 | 1252 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
1253 !hr->is_young()) { | |
1254 if (G1PolicyVerbose > 0) | |
1255 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1256 " during cleanup", hr, hr->used()); | |
1257 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
342 | 1258 } |
1259 } | |
1260 | |
1261 // FIXME: both this and shrink could probably be more efficient by | |
1262 // doing one "VirtualSpace::expand_by" call rather than several. | |
1263 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1264 size_t old_mem_size = _g1_storage.committed_size(); | |
1265 // We expand by a minimum of 1K. | |
1266 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1267 size_t aligned_expand_bytes = | |
1268 ReservedSpace::page_align_size_up(expand_bytes); | |
1269 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1270 HeapRegion::GrainBytes); | |
1271 expand_bytes = aligned_expand_bytes; | |
1272 while (expand_bytes > 0) { | |
1273 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1274 // Commit more storage. | |
1275 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1276 if (!successful) { | |
1277 expand_bytes = 0; | |
1278 } else { | |
1279 expand_bytes -= HeapRegion::GrainBytes; | |
1280 // Expand the committed region. | |
1281 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1282 _g1_committed.set_end(high); | |
1283 // Create a new HeapRegion. | |
1284 MemRegion mr(base, high); | |
1285 bool is_zeroed = !_g1_max_committed.contains(base); | |
1286 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1287 | |
1288 // Now update max_committed if necessary. | |
1289 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1290 | |
1291 // Add it to the HeapRegionSeq. | |
1292 _hrs->insert(hr); | |
1293 // Set the zero-fill state, according to whether it's already | |
1294 // zeroed. | |
1295 { | |
1296 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1297 if (is_zeroed) { | |
1298 hr->set_zero_fill_complete(); | |
1299 put_free_region_on_list_locked(hr); | |
1300 } else { | |
1301 hr->set_zero_fill_needed(); | |
1302 put_region_on_unclean_list_locked(hr); | |
1303 } | |
1304 } | |
1305 _free_regions++; | |
1306 // And we used up an expansion region to create it. | |
1307 _expansion_regions--; | |
1308 // Tell the cardtable about it. | |
1309 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1310 // And the offset table as well. | |
1311 _bot_shared->resize(_g1_committed.word_size()); | |
1312 } | |
1313 } | |
1314 if (Verbose && PrintGC) { | |
1315 size_t new_mem_size = _g1_storage.committed_size(); | |
1316 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1317 old_mem_size/K, aligned_expand_bytes/K, | |
1318 new_mem_size/K); | |
1319 } | |
1320 } | |
1321 | |
1322 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1323 { | |
1324 size_t old_mem_size = _g1_storage.committed_size(); | |
1325 size_t aligned_shrink_bytes = | |
1326 ReservedSpace::page_align_size_down(shrink_bytes); | |
1327 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1328 HeapRegion::GrainBytes); | |
1329 size_t num_regions_deleted = 0; | |
1330 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1331 | |
1332 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1333 if (mr.byte_size() > 0) | |
1334 _g1_storage.shrink_by(mr.byte_size()); | |
1335 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1336 | |
1337 _g1_committed.set_end(mr.start()); | |
1338 _free_regions -= num_regions_deleted; | |
1339 _expansion_regions += num_regions_deleted; | |
1340 | |
1341 // Tell the cardtable about it. | |
1342 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1343 | |
1344 // And the offset table as well. | |
1345 _bot_shared->resize(_g1_committed.word_size()); | |
1346 | |
1347 HeapRegionRemSet::shrink_heap(n_regions()); | |
1348 | |
1349 if (Verbose && PrintGC) { | |
1350 size_t new_mem_size = _g1_storage.committed_size(); | |
1351 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1352 old_mem_size/K, aligned_shrink_bytes/K, | |
1353 new_mem_size/K); | |
1354 } | |
1355 } | |
1356 | |
1357 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
636 | 1358 release_gc_alloc_regions(true /* totally */); |
342 | 1359 tear_down_region_lists(); // We will rebuild them in a moment. |
1360 shrink_helper(shrink_bytes); | |
1361 rebuild_region_lists(); | |
1362 } | |
1363 | |
1364 // Public methods. | |
1365 | |
1366 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1367 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1368 #endif // _MSC_VER | |
1369 | |
1370 | |
1371 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1372 SharedHeap(policy_), | |
1373 _g1_policy(policy_), | |
1374 _ref_processor(NULL), | |
1375 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1376 _bot_shared(NULL), | |
1377 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1378 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1379 _evac_failure_scan_stack(NULL) , | |
1380 _mark_in_progress(false), | |
1381 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1382 _cur_alloc_region(NULL), | |
1383 _refine_cte_cl(NULL), | |
1384 _free_region_list(NULL), _free_region_list_size(0), | |
1385 _free_regions(0), | |
1386 _full_collection(false), | |
1387 _unclean_region_list(), | |
1388 _unclean_regions_coming(false), | |
1389 _young_list(new YoungList(this)), | |
1390 _gc_time_stamp(0), | |
526 | 1391 _surviving_young_words(NULL), |
1392 _in_cset_fast_test(NULL), | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1393 _in_cset_fast_test_base(NULL), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
1394 _dirty_cards_region_list(NULL) { |
342 | 1395 _g1h = this; // To catch bugs. |
1396 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1397 vm_exit_during_initialization("Failed necessary allocation."); | |
1398 } | |
1399 int n_queues = MAX2((int)ParallelGCThreads, 1); | |
1400 _task_queues = new RefToScanQueueSet(n_queues); | |
1401 | |
1402 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1403 assert(n_rem_sets > 0, "Invariant."); | |
1404 | |
1405 HeapRegionRemSetIterator** iter_arr = | |
1406 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1407 for (int i = 0; i < n_queues; i++) { | |
1408 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1409 } | |
1410 _rem_set_iterator = iter_arr; | |
1411 | |
1412 for (int i = 0; i < n_queues; i++) { | |
1413 RefToScanQueue* q = new RefToScanQueue(); | |
1414 q->initialize(); | |
1415 _task_queues->register_queue(i, q); | |
1416 } | |
1417 | |
1418 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
636 | 1419 _gc_alloc_regions[ap] = NULL; |
1420 _gc_alloc_region_counts[ap] = 0; | |
1421 _retained_gc_alloc_regions[ap] = NULL; | |
1422 // by default, we do not retain a GC alloc region for each ap; | |
1423 // we'll override this, when appropriate, below | |
1424 _retain_gc_alloc_region[ap] = false; | |
1425 } | |
1426 | |
1427 // We will try to remember the last half-full tenured region we | |
1428 // allocated to at the end of a collection so that we can re-use it | |
1429 // during the next collection. | |
1430 _retain_gc_alloc_region[GCAllocForTenured] = true; | |
1431 | |
342 | 1432 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
1433 } | |
1434 | |
1435 jint G1CollectedHeap::initialize() { | |
1436 os::enable_vtime(); | |
1437 | |
1438 // Necessary to satisfy locking discipline assertions. | |
1439 | |
1440 MutexLocker x(Heap_lock); | |
1441 | |
1442 // While there are no constraints in the GC code that HeapWordSize | |
1443 // be any particular value, there are multiple other areas in the | |
1444 // system which believe this to be true (e.g. oop->object_size in some | |
1445 // cases incorrectly returns the size in wordSize units rather than | |
1446 // HeapWordSize). | |
1447 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1448 | |
1449 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1450 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1451 | |
1452 // Ensure that the sizes are properly aligned. | |
1453 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1454 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1455 | |
1456 // We allocate this in any case, but only do no work if the command line | |
1457 // param is off. | |
1458 _cg1r = new ConcurrentG1Refine(); | |
1459 | |
1460 // Reserve the maximum. | |
1461 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1462 // Includes the perm-gen. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1463 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1464 const size_t total_reserved = max_byte_size + pgs->max_size(); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1465 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1466 |
342 | 1467 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1468 HeapRegion::GrainBytes, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1469 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1470 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1471 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1472 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1473 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1474 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1475 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1476 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1477 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1478 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1479 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1480 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1481 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1482 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1483 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1484 false /*ism*/, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1485 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1486 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1487 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1488 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1489 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
620
diff
changeset
|
1490 } |
342 | 1491 |
1492 if (!heap_rs.is_reserved()) { | |
1493 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1494 return JNI_ENOMEM; | |
1495 } | |
1496 | |
1497 // It is important to do this in a way such that concurrent readers can't | |
1498 // temporarily think somethings in the heap. (I've actually seen this | |
1499 // happen in asserts: DLD.) | |
1500 _reserved.set_word_size(0); | |
1501 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1502 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1503 | |
1504 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1505 | |
1506 _num_humongous_regions = 0; | |
1507 | |
1508 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1509 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1510 set_barrier_set(rem_set()->bs()); | |
1511 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1512 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1513 } else { | |
1514 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1515 return JNI_ENOMEM; | |
1516 } | |
1517 | |
1518 // Also create a G1 rem set. | |
1519 if (G1UseHRIntoRS) { | |
1520 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1521 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1522 } else { | |
1523 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1524 return JNI_ENOMEM; | |
1525 } | |
1526 } else { | |
1527 _g1_rem_set = new StupidG1RemSet(this); | |
1528 } | |
1529 | |
1530 // Carve out the G1 part of the heap. | |
1531 | |
1532 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1533 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1534 g1_rs.size()/HeapWordSize); | |
1535 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1536 | |
1537 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1538 | |
1539 _g1_storage.initialize(g1_rs, 0); | |
1540 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1541 _g1_max_committed = _g1_committed; | |
393 | 1542 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1543 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1544 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1545 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1546 // 6843694 - ensure that the maximum region index can fit |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1547 // in the remembered set structures. |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1548 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1549 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1550 |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1551 const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1552 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1553 guarantee(cards_per_region < max_cards_per_region, "too many cards per region"); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
1554 |
342 | 1555 _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
1556 heap_word_size(init_byte_size)); | |
1557 | |
1558 _g1h = this; | |
1559 | |
1560 // Create the ConcurrentMark data structure and thread. | |
1561 // (Must do this late, so that "max_regions" is defined.) | |
1562 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1563 _cmThread = _cm->cmThread(); | |
1564 | |
1565 // ...and the concurrent zero-fill thread, if necessary. | |
1566 if (G1ConcZeroFill) { | |
1567 _czft = new ConcurrentZFThread(); | |
1568 } | |
1569 | |
1570 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1571 HeapRegionRemSet::init_heap(max_regions()); | |
1572 | |
677 | 1573 // Now expand into the initial heap size. |
1574 expand(init_byte_size); | |
342 | 1575 |
1576 // Perform any initialization actions delegated to the policy. | |
1577 g1_policy()->init(); | |
1578 | |
1579 g1_policy()->note_start_of_mark_thread(); | |
1580 | |
1581 _refine_cte_cl = | |
1582 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1583 g1_rem_set(), | |
1584 concurrent_g1_refine()); | |
1585 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1586 | |
1587 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1588 SATB_Q_FL_lock, | |
1589 0, | |
1590 Shared_SATB_Q_lock); | |
794 | 1591 |
1592 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1593 DirtyCardQ_FL_lock, | |
883
27f6a9b9c311
6864886: G1: rename -XX parameters related to update buffers
tonyp
parents:
862
diff
changeset
|
1594 G1UpdateBufferQueueMaxLength, |
794 | 1595 Shared_DirtyCardQ_lock); |
1596 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1597 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1598 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1599 DirtyCardQ_FL_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1600 0, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1601 Shared_DirtyCardQ_lock, |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1602 &JavaThread::dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
1603 } |
342 | 1604 // In case we're keeping closure specialization stats, initialize those |
1605 // counts and that mechanism. | |
1606 SpecializationStats::clear(); | |
1607 | |
1608 _gc_alloc_region_list = NULL; | |
1609 | |
1610 // Do later initialization work for concurrent refinement. | |
1611 _cg1r->init(); | |
1612 | |
1613 const char* group_names[] = { "CR", "ZF", "CM", "CL" }; | |
1614 GCOverheadReporter::initGCOverheadReporter(4, group_names); | |
1615 | |
1616 return JNI_OK; | |
1617 } | |
1618 | |
1619 void G1CollectedHeap::ref_processing_init() { | |
1620 SharedHeap::ref_processing_init(); | |
1621 MemRegion mr = reserved_region(); | |
1622 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1623 mr, // span | |
1624 false, // Reference discovery is not atomic | |
1625 // (though it shouldn't matter here.) | |
1626 true, // mt_discovery | |
1627 NULL, // is alive closure: need to fill this in for efficiency | |
1628 ParallelGCThreads, | |
1629 ParallelRefProcEnabled, | |
1630 true); // Setting next fields of discovered | |
1631 // lists requires a barrier. | |
1632 } | |
1633 | |
1634 size_t G1CollectedHeap::capacity() const { | |
1635 return _g1_committed.byte_size(); | |
1636 } | |
1637 | |
1638 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, | |
1639 int worker_i) { | |
889 | 1640 // Clean cards in the hot card cache |
1641 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set()); | |
1642 | |
342 | 1643 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
1644 int n_completed_buffers = 0; | |
1645 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { | |
1646 n_completed_buffers++; | |
1647 } | |
1648 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1649 (double) n_completed_buffers); | |
1650 dcqs.clear_n_completed_buffers(); | |
1651 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1652 } | |
1653 | |
1654 | |
1655 // Computes the sum of the storage used by the various regions. | |
1656 | |
1657 size_t G1CollectedHeap::used() const { | |
862
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1658 assert(Heap_lock->owner() != NULL, |
36b5611220a7
6863216: Clean up debugging debris inadvertently pushed with 6700789
ysr
parents:
861
diff
changeset
|
1659 "Should be owned on this thread's behalf."); |
342 | 1660 size_t result = _summary_bytes_used; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1661 // Read only once in case it is set to NULL concurrently |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1662 HeapRegion* hr = _cur_alloc_region; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1663 if (hr != NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
1664 result += hr->used(); |
342 | 1665 return result; |
1666 } | |
1667 | |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1668 size_t G1CollectedHeap::used_unlocked() const { |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1669 size_t result = _summary_bytes_used; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1670 return result; |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1671 } |
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
1672 |
342 | 1673 class SumUsedClosure: public HeapRegionClosure { |
1674 size_t _used; | |
1675 public: | |
1676 SumUsedClosure() : _used(0) {} | |
1677 bool doHeapRegion(HeapRegion* r) { | |
1678 if (!r->continuesHumongous()) { | |
1679 _used += r->used(); | |
1680 } | |
1681 return false; | |
1682 } | |
1683 size_t result() { return _used; } | |
1684 }; | |
1685 | |
1686 size_t G1CollectedHeap::recalculate_used() const { | |
1687 SumUsedClosure blk; | |
1688 _hrs->iterate(&blk); | |
1689 return blk.result(); | |
1690 } | |
1691 | |
1692 #ifndef PRODUCT | |
1693 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1694 size_t _num; | |
1695 public: | |
677 | 1696 SumUsedRegionsClosure() : _num(0) {} |
342 | 1697 bool doHeapRegion(HeapRegion* r) { |
1698 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1699 _num += 1; | |
1700 } | |
1701 return false; | |
1702 } | |
1703 size_t result() { return _num; } | |
1704 }; | |
1705 | |
1706 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1707 SumUsedRegionsClosure blk; | |
1708 _hrs->iterate(&blk); | |
1709 return blk.result(); | |
1710 } | |
1711 #endif // PRODUCT | |
1712 | |
1713 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1714 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1715 // otherwise, is there space in the current allocation region? | |
1716 | |
1717 // We need to store the current allocation region in a local variable | |
1718 // here. The problem is that this method doesn't take any locks and | |
1719 // there may be other threads which overwrite the current allocation | |
1720 // region field. attempt_allocation(), for example, sets it to NULL | |
1721 // and this can happen *after* the NULL check here but before the call | |
1722 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1723 // to be a problem in the optimized build, since the two loads of the | |
1724 // current allocation region field are optimized away. | |
1725 HeapRegion* car = _cur_alloc_region; | |
1726 | |
1727 // FIXME: should iterate over all regions? | |
1728 if (car == NULL) { | |
1729 return 0; | |
1730 } | |
1731 return car->free(); | |
1732 } | |
1733 | |
1734 void G1CollectedHeap::collect(GCCause::Cause cause) { | |
1735 // The caller doesn't have the Heap_lock | |
1736 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
1737 MutexLocker ml(Heap_lock); | |
1738 collect_locked(cause); | |
1739 } | |
1740 | |
1741 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
1742 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1743 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1744 GCCauseSetter gcs(this, cause); | |
1745 switch (cause) { | |
1746 case GCCause::_heap_inspection: | |
1747 case GCCause::_heap_dump: { | |
1748 HandleMark hm; | |
1749 do_full_collection(false); // don't clear all soft refs | |
1750 break; | |
1751 } | |
1752 default: // XXX FIX ME | |
1753 ShouldNotReachHere(); // Unexpected use of this function | |
1754 } | |
1755 } | |
1756 | |
1757 | |
1758 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { | |
1759 // Don't want to do a GC until cleanup is completed. | |
1760 wait_for_cleanup_complete(); | |
1761 | |
1762 // Read the GC count while holding the Heap_lock | |
1763 int gc_count_before = SharedHeap::heap()->total_collections(); | |
1764 { | |
1765 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1766 VM_G1CollectFull op(gc_count_before, cause); | |
1767 VMThread::execute(&op); | |
1768 } | |
1769 } | |
1770 | |
1771 bool G1CollectedHeap::is_in(const void* p) const { | |
1772 if (_g1_committed.contains(p)) { | |
1773 HeapRegion* hr = _hrs->addr_to_region(p); | |
1774 return hr->is_in(p); | |
1775 } else { | |
1776 return _perm_gen->as_gen()->is_in(p); | |
1777 } | |
1778 } | |
1779 | |
1780 // Iteration functions. | |
1781 | |
1782 // Iterates an OopClosure over all ref-containing fields of objects | |
1783 // within a HeapRegion. | |
1784 | |
1785 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1786 MemRegion _mr; | |
1787 OopClosure* _cl; | |
1788 public: | |
1789 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1790 : _mr(mr), _cl(cl) {} | |
1791 bool doHeapRegion(HeapRegion* r) { | |
1792 if (! r->continuesHumongous()) { | |
1793 r->oop_iterate(_cl); | |
1794 } | |
1795 return false; | |
1796 } | |
1797 }; | |
1798 | |
678 | 1799 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
342 | 1800 IterateOopClosureRegionClosure blk(_g1_committed, cl); |
1801 _hrs->iterate(&blk); | |
678 | 1802 if (do_perm) { |
1803 perm_gen()->oop_iterate(cl); | |
1804 } | |
342 | 1805 } |
1806 | |
678 | 1807 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
342 | 1808 IterateOopClosureRegionClosure blk(mr, cl); |
1809 _hrs->iterate(&blk); | |
678 | 1810 if (do_perm) { |
1811 perm_gen()->oop_iterate(cl); | |
1812 } | |
342 | 1813 } |
1814 | |
1815 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1816 | |
1817 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1818 ObjectClosure* _cl; | |
1819 public: | |
1820 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1821 bool doHeapRegion(HeapRegion* r) { | |
1822 if (! r->continuesHumongous()) { | |
1823 r->object_iterate(_cl); | |
1824 } | |
1825 return false; | |
1826 } | |
1827 }; | |
1828 | |
678 | 1829 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
342 | 1830 IterateObjectClosureRegionClosure blk(cl); |
1831 _hrs->iterate(&blk); | |
678 | 1832 if (do_perm) { |
1833 perm_gen()->object_iterate(cl); | |
1834 } | |
342 | 1835 } |
1836 | |
1837 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1838 // FIXME: is this right? | |
1839 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1840 } | |
1841 | |
1842 // Calls a SpaceClosure on a HeapRegion. | |
1843 | |
1844 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1845 SpaceClosure* _cl; | |
1846 public: | |
1847 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1848 bool doHeapRegion(HeapRegion* r) { | |
1849 _cl->do_space(r); | |
1850 return false; | |
1851 } | |
1852 }; | |
1853 | |
1854 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1855 SpaceClosureRegionClosure blk(cl); | |
1856 _hrs->iterate(&blk); | |
1857 } | |
1858 | |
1859 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1860 _hrs->iterate(cl); | |
1861 } | |
1862 | |
1863 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1864 HeapRegionClosure* cl) { | |
1865 _hrs->iterate_from(r, cl); | |
1866 } | |
1867 | |
1868 void | |
1869 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1870 _hrs->iterate_from(idx, cl); | |
1871 } | |
1872 | |
1873 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1874 | |
1875 void | |
1876 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1877 int worker, | |
1878 jint claim_value) { | |
355 | 1879 const size_t regions = n_regions(); |
1880 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1881 // try to spread out the starting points of the workers | |
1882 const size_t start_index = regions / worker_num * (size_t) worker; | |
1883 | |
1884 // each worker will actually look at all regions | |
1885 for (size_t count = 0; count < regions; ++count) { | |
1886 const size_t index = (start_index + count) % regions; | |
1887 assert(0 <= index && index < regions, "sanity"); | |
1888 HeapRegion* r = region_at(index); | |
1889 // we'll ignore "continues humongous" regions (we'll process them | |
1890 // when we come across their corresponding "start humongous" | |
1891 // region) and regions already claimed | |
1892 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1893 continue; | |
1894 } | |
1895 // OK, try to claim it | |
342 | 1896 if (r->claimHeapRegion(claim_value)) { |
355 | 1897 // success! |
1898 assert(!r->continuesHumongous(), "sanity"); | |
1899 if (r->startsHumongous()) { | |
1900 // If the region is "starts humongous" we'll iterate over its | |
1901 // "continues humongous" first; in fact we'll do them | |
1902 // first. The order is important. In on case, calling the | |
1903 // closure on the "starts humongous" region might de-allocate | |
1904 // and clear all its "continues humongous" regions and, as a | |
1905 // result, we might end up processing them twice. So, we'll do | |
1906 // them first (notice: most closures will ignore them anyway) and | |
1907 // then we'll do the "starts humongous" region. | |
1908 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1909 HeapRegion* chr = region_at(ch_index); | |
1910 | |
1911 // if the region has already been claimed or it's not | |
1912 // "continues humongous" we're done | |
1913 if (chr->claim_value() == claim_value || | |
1914 !chr->continuesHumongous()) { | |
1915 break; | |
1916 } | |
1917 | |
1918 // Noone should have claimed it directly. We can given | |
1919 // that we claimed its "starts humongous" region. | |
1920 assert(chr->claim_value() != claim_value, "sanity"); | |
1921 assert(chr->humongous_start_region() == r, "sanity"); | |
1922 | |
1923 if (chr->claimHeapRegion(claim_value)) { | |
1924 // we should always be able to claim it; noone else should | |
1925 // be trying to claim this region | |
1926 | |
1927 bool res2 = cl->doHeapRegion(chr); | |
1928 assert(!res2, "Should not abort"); | |
1929 | |
1930 // Right now, this holds (i.e., no closure that actually | |
1931 // does something with "continues humongous" regions | |
1932 // clears them). We might have to weaken it in the future, | |
1933 // but let's leave these two asserts here for extra safety. | |
1934 assert(chr->continuesHumongous(), "should still be the case"); | |
1935 assert(chr->humongous_start_region() == r, "sanity"); | |
1936 } else { | |
1937 guarantee(false, "we should not reach here"); | |
1938 } | |
1939 } | |
1940 } | |
1941 | |
1942 assert(!r->continuesHumongous(), "sanity"); | |
1943 bool res = cl->doHeapRegion(r); | |
1944 assert(!res, "Should not abort"); | |
1945 } | |
1946 } | |
1947 } | |
1948 | |
390 | 1949 class ResetClaimValuesClosure: public HeapRegionClosure { |
1950 public: | |
1951 bool doHeapRegion(HeapRegion* r) { | |
1952 r->set_claim_value(HeapRegion::InitialClaimValue); | |
1953 return false; | |
1954 } | |
1955 }; | |
1956 | |
1957 void | |
1958 G1CollectedHeap::reset_heap_region_claim_values() { | |
1959 ResetClaimValuesClosure blk; | |
1960 heap_region_iterate(&blk); | |
1961 } | |
1962 | |
355 | 1963 #ifdef ASSERT |
1964 // This checks whether all regions in the heap have the correct claim | |
1965 // value. I also piggy-backed on this a check to ensure that the | |
1966 // humongous_start_region() information on "continues humongous" | |
1967 // regions is correct. | |
1968 | |
1969 class CheckClaimValuesClosure : public HeapRegionClosure { | |
1970 private: | |
1971 jint _claim_value; | |
1972 size_t _failures; | |
1973 HeapRegion* _sh_region; | |
1974 public: | |
1975 CheckClaimValuesClosure(jint claim_value) : | |
1976 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
1977 bool doHeapRegion(HeapRegion* r) { | |
1978 if (r->claim_value() != _claim_value) { | |
1979 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1980 "claim value = %d, should be %d", | |
1981 r->bottom(), r->end(), r->claim_value(), | |
1982 _claim_value); | |
1983 ++_failures; | |
1984 } | |
1985 if (!r->isHumongous()) { | |
1986 _sh_region = NULL; | |
1987 } else if (r->startsHumongous()) { | |
1988 _sh_region = r; | |
1989 } else if (r->continuesHumongous()) { | |
1990 if (r->humongous_start_region() != _sh_region) { | |
1991 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1992 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
1993 r->bottom(), r->end(), | |
1994 r->humongous_start_region(), | |
1995 _sh_region); | |
1996 ++_failures; | |
342 | 1997 } |
1998 } | |
355 | 1999 return false; |
2000 } | |
2001 size_t failures() { | |
2002 return _failures; | |
2003 } | |
2004 }; | |
2005 | |
2006 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
2007 CheckClaimValuesClosure cl(claim_value); | |
2008 heap_region_iterate(&cl); | |
2009 return cl.failures() == 0; | |
2010 } | |
2011 #endif // ASSERT | |
342 | 2012 |
2013 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
2014 HeapRegion* r = g1_policy()->collection_set(); | |
2015 while (r != NULL) { | |
2016 HeapRegion* next = r->next_in_collection_set(); | |
2017 if (cl->doHeapRegion(r)) { | |
2018 cl->incomplete(); | |
2019 return; | |
2020 } | |
2021 r = next; | |
2022 } | |
2023 } | |
2024 | |
2025 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
2026 HeapRegionClosure *cl) { | |
2027 assert(r->in_collection_set(), | |
2028 "Start region must be a member of the collection set."); | |
2029 HeapRegion* cur = r; | |
2030 while (cur != NULL) { | |
2031 HeapRegion* next = cur->next_in_collection_set(); | |
2032 if (cl->doHeapRegion(cur) && false) { | |
2033 cl->incomplete(); | |
2034 return; | |
2035 } | |
2036 cur = next; | |
2037 } | |
2038 cur = g1_policy()->collection_set(); | |
2039 while (cur != r) { | |
2040 HeapRegion* next = cur->next_in_collection_set(); | |
2041 if (cl->doHeapRegion(cur) && false) { | |
2042 cl->incomplete(); | |
2043 return; | |
2044 } | |
2045 cur = next; | |
2046 } | |
2047 } | |
2048 | |
2049 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
2050 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
2051 } | |
2052 | |
2053 | |
2054 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
2055 Space* res = heap_region_containing(addr); | |
2056 if (res == NULL) | |
2057 res = perm_gen()->space_containing(addr); | |
2058 return res; | |
2059 } | |
2060 | |
2061 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
2062 Space* sp = space_containing(addr); | |
2063 if (sp != NULL) { | |
2064 return sp->block_start(addr); | |
2065 } | |
2066 return NULL; | |
2067 } | |
2068 | |
2069 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
2070 Space* sp = space_containing(addr); | |
2071 assert(sp != NULL, "block_size of address outside of heap"); | |
2072 return sp->block_size(addr); | |
2073 } | |
2074 | |
2075 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
2076 Space* sp = space_containing(addr); | |
2077 return sp->block_is_obj(addr); | |
2078 } | |
2079 | |
2080 bool G1CollectedHeap::supports_tlab_allocation() const { | |
2081 return true; | |
2082 } | |
2083 | |
2084 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
2085 return HeapRegion::GrainBytes; | |
2086 } | |
2087 | |
2088 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
2089 // Return the remaining space in the cur alloc region, but not less than | |
2090 // the min TLAB size. | |
2091 // Also, no more than half the region size, since we can't allow tlabs to | |
2092 // grow big enough to accomodate humongous objects. | |
2093 | |
2094 // We need to story it locally, since it might change between when we | |
2095 // test for NULL and when we use it later. | |
2096 ContiguousSpace* cur_alloc_space = _cur_alloc_region; | |
2097 if (cur_alloc_space == NULL) { | |
2098 return HeapRegion::GrainBytes/2; | |
2099 } else { | |
2100 return MAX2(MIN2(cur_alloc_space->free(), | |
2101 (size_t)(HeapRegion::GrainBytes/2)), | |
2102 (size_t)MinTLABSize); | |
2103 } | |
2104 } | |
2105 | |
2106 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
2107 bool dummy; | |
2108 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
2109 } | |
2110 | |
2111 bool G1CollectedHeap::allocs_are_zero_filled() { | |
2112 return false; | |
2113 } | |
2114 | |
2115 size_t G1CollectedHeap::large_typearray_limit() { | |
2116 // FIXME | |
2117 return HeapRegion::GrainBytes/HeapWordSize; | |
2118 } | |
2119 | |
2120 size_t G1CollectedHeap::max_capacity() const { | |
2121 return _g1_committed.byte_size(); | |
2122 } | |
2123 | |
2124 jlong G1CollectedHeap::millis_since_last_gc() { | |
2125 // assert(false, "NYI"); | |
2126 return 0; | |
2127 } | |
2128 | |
2129 | |
2130 void G1CollectedHeap::prepare_for_verify() { | |
2131 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2132 ensure_parsability(false); | |
2133 } | |
2134 g1_rem_set()->prepare_for_verify(); | |
2135 } | |
2136 | |
2137 class VerifyLivenessOopClosure: public OopClosure { | |
2138 G1CollectedHeap* g1h; | |
2139 public: | |
2140 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
2141 g1h = _g1h; | |
2142 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2143 void do_oop(narrowOop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2144 void do_oop( oop *p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2145 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2146 template <class T> void do_oop_work(T *p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2147 oop obj = oopDesc::load_decode_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2148 guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2149 "Dead object referenced by a not dead object"); |
342 | 2150 } |
2151 }; | |
2152 | |
2153 class VerifyObjsInRegionClosure: public ObjectClosure { | |
811 | 2154 private: |
342 | 2155 G1CollectedHeap* _g1h; |
2156 size_t _live_bytes; | |
2157 HeapRegion *_hr; | |
811 | 2158 bool _use_prev_marking; |
342 | 2159 public: |
811 | 2160 // use_prev_marking == true -> use "prev" marking information, |
2161 // use_prev_marking == false -> use "next" marking information | |
2162 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) | |
2163 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { | |
342 | 2164 _g1h = G1CollectedHeap::heap(); |
2165 } | |
2166 void do_object(oop o) { | |
2167 VerifyLivenessOopClosure isLive(_g1h); | |
2168 assert(o != NULL, "Huh?"); | |
811 | 2169 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
342 | 2170 o->oop_iterate(&isLive); |
2171 if (!_hr->obj_allocated_since_prev_marking(o)) | |
2172 _live_bytes += (o->size() * HeapWordSize); | |
2173 } | |
2174 } | |
2175 size_t live_bytes() { return _live_bytes; } | |
2176 }; | |
2177 | |
2178 class PrintObjsInRegionClosure : public ObjectClosure { | |
2179 HeapRegion *_hr; | |
2180 G1CollectedHeap *_g1; | |
2181 public: | |
2182 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2183 _g1 = G1CollectedHeap::heap(); | |
2184 }; | |
2185 | |
2186 void do_object(oop o) { | |
2187 if (o != NULL) { | |
2188 HeapWord *start = (HeapWord *) o; | |
2189 size_t word_sz = o->size(); | |
2190 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2191 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2192 (void*) o, word_sz, | |
2193 _g1->isMarkedPrev(o), | |
2194 _g1->isMarkedNext(o), | |
2195 _hr->obj_allocated_since_prev_marking(o)); | |
2196 HeapWord *end = start + word_sz; | |
2197 HeapWord *cur; | |
2198 int *val; | |
2199 for (cur = start; cur < end; cur++) { | |
2200 val = (int *) cur; | |
2201 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2202 } | |
2203 } | |
2204 } | |
2205 }; | |
2206 | |
2207 class VerifyRegionClosure: public HeapRegionClosure { | |
811 | 2208 private: |
342 | 2209 bool _allow_dirty; |
390 | 2210 bool _par; |
811 | 2211 bool _use_prev_marking; |
2212 public: | |
2213 // use_prev_marking == true -> use "prev" marking information, | |
2214 // use_prev_marking == false -> use "next" marking information | |
2215 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2216 : _allow_dirty(allow_dirty), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2217 _par(par), |
811 | 2218 _use_prev_marking(use_prev_marking) {} |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2219 |
342 | 2220 bool doHeapRegion(HeapRegion* r) { |
390 | 2221 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2222 "Should be unclaimed at verify points."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2223 if (!r->continuesHumongous()) { |
811 | 2224 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
2225 r->verify(_allow_dirty, _use_prev_marking); | |
342 | 2226 r->object_iterate(¬_dead_yet_cl); |
2227 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), | |
2228 "More live objects than counted in last complete marking."); | |
2229 } | |
2230 return false; | |
2231 } | |
2232 }; | |
2233 | |
2234 class VerifyRootsClosure: public OopsInGenClosure { | |
2235 private: | |
2236 G1CollectedHeap* _g1h; | |
2237 bool _failures; | |
811 | 2238 bool _use_prev_marking; |
342 | 2239 public: |
811 | 2240 // use_prev_marking == true -> use "prev" marking information, |
2241 // use_prev_marking == false -> use "next" marking information | |
2242 VerifyRootsClosure(bool use_prev_marking) : | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2243 _g1h(G1CollectedHeap::heap()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2244 _failures(false), |
811 | 2245 _use_prev_marking(use_prev_marking) { } |
342 | 2246 |
2247 bool failures() { return _failures; } | |
2248 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2249 template <class T> void do_oop_nv(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2250 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2251 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2252 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
811 | 2253 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
342 | 2254 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
2255 "points to dead obj "PTR_FORMAT, p, (void*) obj); | |
2256 obj->print_on(gclog_or_tty); | |
2257 _failures = true; | |
2258 } | |
2259 } | |
2260 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2261 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2262 void do_oop(oop* p) { do_oop_nv(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2263 void do_oop(narrowOop* p) { do_oop_nv(p); } |
342 | 2264 }; |
2265 | |
390 | 2266 // This is the task used for parallel heap verification. |
2267 | |
2268 class G1ParVerifyTask: public AbstractGangTask { | |
2269 private: | |
2270 G1CollectedHeap* _g1h; | |
2271 bool _allow_dirty; | |
811 | 2272 bool _use_prev_marking; |
390 | 2273 |
2274 public: | |
811 | 2275 // use_prev_marking == true -> use "prev" marking information, |
2276 // use_prev_marking == false -> use "next" marking information | |
2277 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, | |
2278 bool use_prev_marking) : | |
390 | 2279 AbstractGangTask("Parallel verify task"), |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2280 _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2281 _allow_dirty(allow_dirty), |
811 | 2282 _use_prev_marking(use_prev_marking) { } |
390 | 2283 |
2284 void work(int worker_i) { | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2285 HandleMark hm; |
811 | 2286 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
390 | 2287 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
2288 HeapRegion::ParVerifyClaimValue); | |
2289 } | |
2290 }; | |
2291 | |
342 | 2292 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
811 | 2293 verify(allow_dirty, silent, /* use_prev_marking */ true); |
2294 } | |
2295 | |
2296 void G1CollectedHeap::verify(bool allow_dirty, | |
2297 bool silent, | |
2298 bool use_prev_marking) { | |
342 | 2299 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
2300 if (!silent) { gclog_or_tty->print("roots "); } | |
811 | 2301 VerifyRootsClosure rootsCl(use_prev_marking); |
342 | 2302 process_strong_roots(false, |
2303 SharedHeap::SO_AllClasses, | |
2304 &rootsCl, | |
2305 &rootsCl); | |
2306 rem_set()->invalidate(perm_gen()->used_region(), false); | |
2307 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2308 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2309 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2310 "sanity check"); | |
2311 | |
811 | 2312 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
390 | 2313 int n_workers = workers()->total_workers(); |
2314 set_par_threads(n_workers); | |
2315 workers()->run_task(&task); | |
2316 set_par_threads(0); | |
2317 | |
2318 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2319 "sanity check"); | |
2320 | |
2321 reset_heap_region_claim_values(); | |
2322 | |
2323 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2324 "sanity check"); | |
2325 } else { | |
811 | 2326 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
390 | 2327 _hrs->iterate(&blk); |
2328 } | |
342 | 2329 if (!silent) gclog_or_tty->print("remset "); |
2330 rem_set()->verify(); | |
2331 guarantee(!rootsCl.failures(), "should not have had failures"); | |
2332 } else { | |
2333 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2334 } | |
2335 } | |
2336 | |
2337 class PrintRegionClosure: public HeapRegionClosure { | |
2338 outputStream* _st; | |
2339 public: | |
2340 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2341 bool doHeapRegion(HeapRegion* r) { | |
2342 r->print_on(_st); | |
2343 return false; | |
2344 } | |
2345 }; | |
2346 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2347 void G1CollectedHeap::print() const { print_on(tty); } |
342 | 2348 |
2349 void G1CollectedHeap::print_on(outputStream* st) const { | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2350 print_on(st, PrintHeapAtGCExtended); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2351 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2352 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2353 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2354 st->print(" %-20s", "garbage-first heap"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2355 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
846
42d84bbbecf4
6859911: G1: assert(Heap_lock->owner() = NULL, "Should be owned on this thread's behalf")
tonyp
parents:
845
diff
changeset
|
2356 capacity()/K, used_unlocked()/K); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2357 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2358 _g1_storage.low_boundary(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2359 _g1_storage.high(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2360 _g1_storage.high_boundary()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2361 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2362 st->print(" region size " SIZE_FORMAT "K, ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2363 HeapRegion::GrainBytes/K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2364 size_t young_regions = _young_list->length(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2365 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2366 young_regions, young_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2367 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2368 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2369 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2370 st->cr(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2371 perm()->as_gen()->print_on(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2372 if (extended) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2373 print_on_extended(st); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2374 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2375 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2376 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2377 void G1CollectedHeap::print_on_extended(outputStream* st) const { |
342 | 2378 PrintRegionClosure blk(st); |
2379 _hrs->iterate(&blk); | |
2380 } | |
2381 | |
794 | 2382 class PrintOnThreadsClosure : public ThreadClosure { |
2383 outputStream* _st; | |
2384 public: | |
2385 PrintOnThreadsClosure(outputStream* st) : _st(st) { } | |
2386 virtual void do_thread(Thread *t) { | |
2387 t->print_on(_st); | |
2388 } | |
2389 }; | |
2390 | |
342 | 2391 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { |
2392 if (ParallelGCThreads > 0) { | |
2393 workers()->print_worker_threads(); | |
2394 } | |
2395 st->print("\"G1 concurrent mark GC Thread\" "); | |
2396 _cmThread->print(); | |
2397 st->cr(); | |
794 | 2398 st->print("\"G1 concurrent refinement GC Threads\" "); |
2399 PrintOnThreadsClosure p(st); | |
2400 _cg1r->threads_do(&p); | |
342 | 2401 st->cr(); |
2402 st->print("\"G1 zero-fill GC Thread\" "); | |
2403 _czft->print_on(st); | |
2404 st->cr(); | |
2405 } | |
2406 | |
2407 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2408 if (ParallelGCThreads > 0) { | |
2409 workers()->threads_do(tc); | |
2410 } | |
2411 tc->do_thread(_cmThread); | |
794 | 2412 _cg1r->threads_do(tc); |
342 | 2413 tc->do_thread(_czft); |
2414 } | |
2415 | |
2416 void G1CollectedHeap::print_tracing_info() const { | |
2417 concurrent_g1_refine()->print_final_card_counts(); | |
2418 | |
2419 // We'll overload this to mean "trace GC pause statistics." | |
2420 if (TraceGen0Time || TraceGen1Time) { | |
2421 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2422 // to that. | |
2423 g1_policy()->print_tracing_info(); | |
2424 } | |
751 | 2425 if (G1SummarizeRSetStats) { |
342 | 2426 g1_rem_set()->print_summary_info(); |
2427 } | |
751 | 2428 if (G1SummarizeConcurrentMark) { |
342 | 2429 concurrent_mark()->print_summary_info(); |
2430 } | |
751 | 2431 if (G1SummarizeZFStats) { |
342 | 2432 ConcurrentZFThread::print_summary_info(); |
2433 } | |
2434 g1_policy()->print_yg_surv_rate_info(); | |
2435 | |
2436 GCOverheadReporter::printGCOverhead(); | |
2437 | |
2438 SpecializationStats::print(); | |
2439 } | |
2440 | |
2441 | |
2442 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2443 HeapRegion* hr = heap_region_containing(addr); | |
2444 if (hr == NULL) { | |
2445 return 0; | |
2446 } else { | |
2447 return 1; | |
2448 } | |
2449 } | |
2450 | |
2451 G1CollectedHeap* G1CollectedHeap::heap() { | |
2452 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2453 "not a garbage-first heap"); | |
2454 return _g1h; | |
2455 } | |
2456 | |
2457 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
2458 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
2459 // Call allocation profiler | |
2460 AllocationProfiler::iterate_since_last_gc(); | |
2461 // Fill TLAB's and such | |
2462 ensure_parsability(true); | |
2463 } | |
2464 | |
2465 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2466 // FIXME: what is this about? | |
2467 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2468 // is set. | |
2469 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2470 "derived pointer present")); | |
2471 } | |
2472 | |
2473 void G1CollectedHeap::do_collection_pause() { | |
2474 // Read the GC count while holding the Heap_lock | |
2475 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2476 // ensure that we do not give up the heap lock and potentially | |
2477 // pick up the wrong count | |
2478 int gc_count_before = SharedHeap::heap()->total_collections(); | |
2479 | |
2480 // Don't want to do a GC pause while cleanup is being completed! | |
2481 wait_for_cleanup_complete(); | |
2482 | |
2483 g1_policy()->record_stop_world_start(); | |
2484 { | |
2485 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
2486 VM_G1IncCollectionPause op(gc_count_before); | |
2487 VMThread::execute(&op); | |
2488 } | |
2489 } | |
2490 | |
2491 void | |
2492 G1CollectedHeap::doConcurrentMark() { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2493 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2494 if (!_cmThread->in_progress()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2495 _cmThread->set_started(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2496 CGC_lock->notify(); |
342 | 2497 } |
2498 } | |
2499 | |
2500 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2501 G1CollectedHeap* _g1h; | |
2502 public: | |
2503 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2504 void do_object(oop obj) { | |
2505 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2506 "markandsweep mark should agree with concurrent deadness"); | |
2507 } | |
2508 }; | |
2509 | |
2510 void | |
2511 G1CollectedHeap::checkConcurrentMark() { | |
2512 VerifyMarkedObjsClosure verifycl(this); | |
2513 // MutexLockerEx x(getMarkBitMapLock(), | |
2514 // Mutex::_no_safepoint_check_flag); | |
678 | 2515 object_iterate(&verifycl, false); |
342 | 2516 } |
2517 | |
2518 void G1CollectedHeap::do_sync_mark() { | |
2519 _cm->checkpointRootsInitial(); | |
2520 _cm->markFromRoots(); | |
2521 _cm->checkpointRootsFinal(false); | |
2522 } | |
2523 | |
2524 // <NEW PREDICTION> | |
2525 | |
2526 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2527 bool young) { | |
2528 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2529 } | |
2530 | |
2531 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2532 predicted_time_ms) { | |
2533 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2534 } | |
2535 | |
2536 size_t G1CollectedHeap::pending_card_num() { | |
2537 size_t extra_cards = 0; | |
2538 JavaThread *curr = Threads::first(); | |
2539 while (curr != NULL) { | |
2540 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2541 extra_cards += dcq.size(); | |
2542 curr = curr->next(); | |
2543 } | |
2544 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2545 size_t buffer_size = dcqs.buffer_size(); | |
2546 size_t buffer_num = dcqs.completed_buffers_num(); | |
2547 return buffer_size * buffer_num + extra_cards; | |
2548 } | |
2549 | |
2550 size_t G1CollectedHeap::max_pending_card_num() { | |
2551 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2552 size_t buffer_size = dcqs.buffer_size(); | |
2553 size_t buffer_num = dcqs.completed_buffers_num(); | |
2554 int thread_num = Threads::number_of_threads(); | |
2555 return (buffer_num + thread_num) * buffer_size; | |
2556 } | |
2557 | |
2558 size_t G1CollectedHeap::cards_scanned() { | |
2559 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2560 return g1_rset->cardsScanned(); | |
2561 } | |
2562 | |
2563 void | |
2564 G1CollectedHeap::setup_surviving_young_words() { | |
2565 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2566 size_t array_length = g1_policy()->young_cset_length(); | |
2567 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2568 if (_surviving_young_words == NULL) { | |
2569 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2570 "Not enough space for young surv words summary."); | |
2571 } | |
2572 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2573 #ifdef ASSERT |
342 | 2574 for (size_t i = 0; i < array_length; ++i) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2575 assert( _surviving_young_words[i] == 0, "memset above" ); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2576 } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2577 #endif // !ASSERT |
342 | 2578 } |
2579 | |
2580 void | |
2581 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2582 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2583 size_t array_length = g1_policy()->young_cset_length(); | |
2584 for (size_t i = 0; i < array_length; ++i) | |
2585 _surviving_young_words[i] += surv_young_words[i]; | |
2586 } | |
2587 | |
2588 void | |
2589 G1CollectedHeap::cleanup_surviving_young_words() { | |
2590 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2591 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2592 _surviving_young_words = NULL; | |
2593 } | |
2594 | |
2595 // </NEW PREDICTION> | |
2596 | |
2597 void | |
677 | 2598 G1CollectedHeap::do_collection_pause_at_safepoint() { |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2599 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2600 Universe::print_heap_before_gc(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2601 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2602 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2603 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2604 char verbose_str[128]; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2605 sprintf(verbose_str, "GC pause "); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2606 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2607 if (g1_policy()->full_young_gcs()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2608 strcat(verbose_str, "(young)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2609 else |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2610 strcat(verbose_str, "(partial)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2611 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2612 if (g1_policy()->should_initiate_conc_mark()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2613 strcat(verbose_str, " (initial-mark)"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2614 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2615 GCCauseSetter x(this, GCCause::_g1_inc_collection_pause); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2616 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2617 // if PrintGCDetails is on, we'll print long statistics information |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2618 // in the collector policy code, so let's not print this as the output |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2619 // is messy if we do. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2620 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2621 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2622 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2623 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2624 ResourceMark rm; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2625 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2626 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2627 guarantee(!is_gc_active(), "collection is not reentrant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2628 assert(regions_accounted_for(), "Region leakage!"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2629 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2630 increment_gc_time_stamp(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2631 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2632 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2633 assert(check_young_list_well_formed(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2634 "young list should be well formed"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2635 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2636 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2637 if (GC_locker::is_active()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2638 return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2639 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2640 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2641 bool abandoned = false; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2642 { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2643 IsGCActiveMark x; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2644 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2645 gc_prologue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2646 increment_total_collections(false /* full gc */); |
342 | 2647 |
2648 #if G1_REM_SET_LOGGING | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2649 gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2650 print(); |
342 | 2651 #endif |
2652 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2653 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2654 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2655 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2656 gclog_or_tty->print(" VerifyBeforeGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2657 Universe::verify(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2658 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2659 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2660 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2661 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2662 // We want to turn off ref discovery, if necessary, and turn it back on |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2663 // on again later if we do. XXX Dubious: why is discovery disabled? |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2664 bool was_enabled = ref_processor()->discovery_enabled(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2665 if (was_enabled) ref_processor()->disable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2666 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2667 // Forget the current alloc region (we might even choose it to be part |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2668 // of the collection set!). |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2669 abandon_cur_alloc_region(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2670 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2671 // The elapsed time induced by the start time below deliberately elides |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2672 // the possible verification above. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2673 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2674 GCOverheadReporter::recordSTWStart(start_time_sec); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2675 size_t start_used_bytes = used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2676 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2677 g1_policy()->record_collection_pause_start(start_time_sec, |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2678 start_used_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2679 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2680 guarantee(_in_cset_fast_test == NULL, "invariant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2681 guarantee(_in_cset_fast_test_base == NULL, "invariant"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2682 _in_cset_fast_test_length = max_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2683 _in_cset_fast_test_base = |
526 | 2684 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2685 memset(_in_cset_fast_test_base, false, |
526 | 2686 _in_cset_fast_test_length * sizeof(bool)); |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2687 // We're biasing _in_cset_fast_test to avoid subtracting the |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2688 // beginning of the heap every time we want to index; basically |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2689 // it's the same with what we do with the card table. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2690 _in_cset_fast_test = _in_cset_fast_test_base - |
526 | 2691 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
2692 | |
342 | 2693 #if SCAN_ONLY_VERBOSE |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2694 _young_list->print(); |
342 | 2695 #endif // SCAN_ONLY_VERBOSE |
2696 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2697 if (g1_policy()->should_initiate_conc_mark()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2698 concurrent_mark()->checkpointRootsInitialPre(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2699 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2700 save_marks(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2701 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2702 // We must do this before any possible evacuation that should propagate |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2703 // marks. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2704 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2705 double start_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2706 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2707 _cm->drainAllSATBBuffers(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2708 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2709 g1_policy()->record_satb_drain_time(finish_mark_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2710 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2711 // Record the number of elements currently on the mark stack, so we |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2712 // only iterate over these. (Since evacuation may add to the mark |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2713 // stack, doing more exposes race conditions.) If no mark is in |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2714 // progress, this will be zero. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2715 _cm->set_oops_do_bound(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2716 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2717 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2718 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2719 if (mark_in_progress()) |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2720 concurrent_mark()->newCSet(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2721 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2722 // Now choose the CS. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2723 g1_policy()->choose_collection_set(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2724 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2725 // We may abandon a pause if we find no region that will fit in the MMU |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2726 // pause. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2727 bool abandoned = (g1_policy()->collection_set() == NULL); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2728 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2729 // Nothing to do if we were unable to choose a collection set. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2730 if (!abandoned) { |
342 | 2731 #if G1_REM_SET_LOGGING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2732 gclog_or_tty->print_cr("\nAfter pause, heap:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2733 print(); |
342 | 2734 #endif |
2735 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2736 setup_surviving_young_words(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2737 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2738 // Set up the gc allocation regions. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2739 get_gc_alloc_regions(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2740 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2741 // Actually do the work... |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2742 evacuate_collection_set(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2743 free_collection_set(g1_policy()->collection_set()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2744 g1_policy()->clear_collection_set(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2745 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2746 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2747 // this is more for peace of mind; we're nulling them here and |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2748 // we're expecting them to be null at the beginning of the next GC |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2749 _in_cset_fast_test = NULL; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2750 _in_cset_fast_test_base = NULL; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2751 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2752 release_gc_alloc_regions(false /* totally */); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2753 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2754 cleanup_surviving_young_words(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2755 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2756 if (g1_policy()->in_young_gc_mode()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2757 _young_list->reset_sampled_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2758 assert(check_young_list_empty(true), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2759 "young list should be empty"); |
342 | 2760 |
2761 #if SCAN_ONLY_VERBOSE | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2762 _young_list->print(); |
342 | 2763 #endif // SCAN_ONLY_VERBOSE |
2764 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2765 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2766 _young_list->first_survivor_region(), |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2767 _young_list->last_survivor_region()); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2768 _young_list->reset_auxilary_lists(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2769 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2770 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2771 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
342 | 2772 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2773 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2774 if (evacuation_failed()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2775 _summary_bytes_used = recalculate_used(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2776 } else { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2777 // The "used" of the the collection set have already been subtracted |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2778 // when they were freed. Add in the bytes evacuated. |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2779 _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2780 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2781 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2782 if (g1_policy()->in_young_gc_mode() && |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2783 g1_policy()->should_initiate_conc_mark()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2784 concurrent_mark()->checkpointRootsInitialPost(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2785 set_marking_started(); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2786 // CAUTION: after the doConcurrentMark() call below, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2787 // the concurrent marking thread(s) could be running |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2788 // concurrently with us. Make sure that anything after |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2789 // this point does not assume that we are the only GC thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2790 // running. Note: of course, the actual marking work will |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2791 // not start until the safepoint itself is released in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
2792 // ConcurrentGCThread::safepoint_desynchronize(). |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2793 doConcurrentMark(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2794 } |
342 | 2795 |
2796 #if SCAN_ONLY_VERBOSE | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2797 _young_list->print(); |
342 | 2798 #endif // SCAN_ONLY_VERBOSE |
2799 | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2800 double end_time_sec = os::elapsedTime(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2801 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2802 g1_policy()->record_pause_time_ms(pause_time_ms); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2803 GCOverheadReporter::recordSTWEnd(end_time_sec); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2804 g1_policy()->record_collection_pause_end(abandoned); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2805 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2806 assert(regions_accounted_for(), "Region leakage."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2807 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2808 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2809 HandleMark hm; // Discard invalid handles created during verification |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2810 gclog_or_tty->print(" VerifyAfterGC:"); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2811 prepare_for_verify(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2812 Universe::verify(false); |
342 | 2813 } |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2814 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2815 if (was_enabled) ref_processor()->enable_discovery(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2816 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2817 { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2818 size_t expand_bytes = g1_policy()->expansion_amount(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2819 if (expand_bytes > 0) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2820 size_t bytes_before = capacity(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2821 expand(expand_bytes); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2822 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2823 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2824 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2825 if (mark_in_progress()) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2826 concurrent_mark()->update_g1_committed(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2827 } |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2828 |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2829 #ifdef TRACESPINNING |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2830 ParallelTaskTerminator::print_termination_counts(); |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
545
diff
changeset
|
2831 #endif |
342 | 2832 |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2833 gc_epilogue(false); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2834 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2835 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2836 assert(verify_region_lists(), "Bad region lists."); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2837 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2838 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2839 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2840 print_tracing_info(); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2841 vm_exit(-1); |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2842 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2843 } |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2844 |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2845 if (PrintHeapAtGC) { |
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
2846 Universe::print_heap_after_gc(); |
342 | 2847 } |
884
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2848 if (G1SummarizeRSetStats && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2849 (G1SummarizeRSetStatsPeriod > 0) && |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2850 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2851 g1_rem_set()->print_summary_info(); |
83b687ce3090
6866591: G1: print update buffer processing stats more often
tonyp
parents:
883
diff
changeset
|
2852 } |
342 | 2853 } |
2854 | |
2855 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { | |
2856 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
636 | 2857 // make sure we don't call set_gc_alloc_region() multiple times on |
2858 // the same region | |
2859 assert(r == NULL || !r->is_gc_alloc_region(), | |
2860 "shouldn't already be a GC alloc region"); | |
342 | 2861 HeapWord* original_top = NULL; |
2862 if (r != NULL) | |
2863 original_top = r->top(); | |
2864 | |
2865 // We will want to record the used space in r as being there before gc. | |
2866 // One we install it as a GC alloc region it's eligible for allocation. | |
2867 // So record it now and use it later. | |
2868 size_t r_used = 0; | |
2869 if (r != NULL) { | |
2870 r_used = r->used(); | |
2871 | |
2872 if (ParallelGCThreads > 0) { | |
2873 // need to take the lock to guard against two threads calling | |
2874 // get_gc_alloc_region concurrently (very unlikely but...) | |
2875 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2876 r->save_marks(); | |
2877 } | |
2878 } | |
2879 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
2880 _gc_alloc_regions[purpose] = r; | |
2881 if (old_alloc_region != NULL) { | |
2882 // Replace aliases too. | |
2883 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2884 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
2885 _gc_alloc_regions[ap] = r; | |
2886 } | |
2887 } | |
2888 } | |
2889 if (r != NULL) { | |
2890 push_gc_alloc_region(r); | |
2891 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
2892 // We are using a region as a GC alloc region after it has been used | |
2893 // as a mutator allocation region during the current marking cycle. | |
2894 // The mutator-allocated objects are currently implicitly marked, but | |
2895 // when we move hr->next_top_at_mark_start() forward at the the end | |
2896 // of the GC pause, they won't be. We therefore mark all objects in | |
2897 // the "gap". We do this object-by-object, since marking densely | |
2898 // does not currently work right with marking bitmap iteration. This | |
2899 // means we rely on TLAB filling at the start of pauses, and no | |
2900 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
2901 // to fix the marking bitmap iteration. | |
2902 HeapWord* curhw = r->next_top_at_mark_start(); | |
2903 HeapWord* t = original_top; | |
2904 | |
2905 while (curhw < t) { | |
2906 oop cur = (oop)curhw; | |
2907 // We'll assume parallel for generality. This is rare code. | |
2908 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
2909 curhw = curhw + cur->size(); | |
2910 } | |
2911 assert(curhw == t, "Should have parsed correctly."); | |
2912 } | |
2913 if (G1PolicyVerbose > 1) { | |
2914 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
2915 "for survivors:", r->bottom(), original_top, r->end()); | |
2916 r->print(); | |
2917 } | |
2918 g1_policy()->record_before_bytes(r_used); | |
2919 } | |
2920 } | |
2921 | |
2922 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
2923 assert(Thread::current()->is_VM_thread() || | |
2924 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
2925 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
2926 "Precondition."); | |
2927 hr->set_is_gc_alloc_region(true); | |
2928 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
2929 _gc_alloc_region_list = hr; | |
2930 } | |
2931 | |
2932 #ifdef G1_DEBUG | |
2933 class FindGCAllocRegion: public HeapRegionClosure { | |
2934 public: | |
2935 bool doHeapRegion(HeapRegion* r) { | |
2936 if (r->is_gc_alloc_region()) { | |
2937 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
2938 r->hrs_index(), r->bottom()); | |
2939 } | |
2940 return false; | |
2941 } | |
2942 }; | |
2943 #endif // G1_DEBUG | |
2944 | |
2945 void G1CollectedHeap::forget_alloc_region_list() { | |
2946 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
2947 while (_gc_alloc_region_list != NULL) { | |
2948 HeapRegion* r = _gc_alloc_region_list; | |
2949 assert(r->is_gc_alloc_region(), "Invariant."); | |
637
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2950 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2951 // newly allocated data in order to be able to apply deferred updates |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2952 // before the GC is done for verification purposes (i.e to allow |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2953 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2954 // collection. |
25e146966e7c
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
636
diff
changeset
|
2955 r->ContiguousSpace::set_saved_mark(); |
342 | 2956 _gc_alloc_region_list = r->next_gc_alloc_region(); |
2957 r->set_next_gc_alloc_region(NULL); | |
2958 r->set_is_gc_alloc_region(false); | |
545 | 2959 if (r->is_survivor()) { |
2960 if (r->is_empty()) { | |
2961 r->set_not_young(); | |
2962 } else { | |
2963 _young_list->add_survivor_region(r); | |
2964 } | |
2965 } | |
342 | 2966 if (r->is_empty()) { |
2967 ++_free_regions; | |
2968 } | |
2969 } | |
2970 #ifdef G1_DEBUG | |
2971 FindGCAllocRegion fa; | |
2972 heap_region_iterate(&fa); | |
2973 #endif // G1_DEBUG | |
2974 } | |
2975 | |
2976 | |
2977 bool G1CollectedHeap::check_gc_alloc_regions() { | |
2978 // TODO: allocation regions check | |
2979 return true; | |
2980 } | |
2981 | |
2982 void G1CollectedHeap::get_gc_alloc_regions() { | |
636 | 2983 // First, let's check that the GC alloc region list is empty (it should) |
2984 assert(_gc_alloc_region_list == NULL, "invariant"); | |
2985 | |
342 | 2986 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
636 | 2987 assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
2988 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
636 | 2989 |
342 | 2990 // Create new GC alloc regions. |
636 | 2991 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
2992 _retained_gc_alloc_regions[ap] = NULL; | |
2993 | |
2994 if (alloc_region != NULL) { | |
2995 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); | |
2996 | |
2997 // let's make sure that the GC alloc region is not tagged as such | |
2998 // outside a GC operation | |
2999 assert(!alloc_region->is_gc_alloc_region(), "sanity"); | |
3000 | |
3001 if (alloc_region->in_collection_set() || | |
3002 alloc_region->top() == alloc_region->end() || | |
3003 alloc_region->top() == alloc_region->bottom()) { | |
3004 // we will discard the current GC alloc region if it's in the | |
3005 // collection set (it can happen!), if it's already full (no | |
3006 // point in using it), or if it's empty (this means that it | |
3007 // was emptied during a cleanup and it should be on the free | |
3008 // list now). | |
3009 | |
3010 alloc_region = NULL; | |
3011 } | |
3012 } | |
3013 | |
3014 if (alloc_region == NULL) { | |
3015 // we will get a new GC alloc region | |
342 | 3016 alloc_region = newAllocRegionWithExpansion(ap, 0); |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3017 } else { |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3018 // the region was retained from the last collection |
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3019 ++_gc_alloc_region_counts[ap]; |
342 | 3020 } |
636 | 3021 |
342 | 3022 if (alloc_region != NULL) { |
636 | 3023 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
342 | 3024 set_gc_alloc_region(ap, alloc_region); |
3025 } | |
636 | 3026 |
3027 assert(_gc_alloc_regions[ap] == NULL || | |
3028 _gc_alloc_regions[ap]->is_gc_alloc_region(), | |
3029 "the GC alloc region should be tagged as such"); | |
3030 assert(_gc_alloc_regions[ap] == NULL || | |
3031 _gc_alloc_regions[ap] == _gc_alloc_region_list, | |
3032 "the GC alloc region should be the same as the GC alloc list head"); | |
342 | 3033 } |
3034 // Set alternative regions for allocation purposes that have reached | |
636 | 3035 // their limit. |
342 | 3036 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
3037 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
3038 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
3039 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
3040 } | |
3041 } | |
3042 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3043 } | |
3044 | |
636 | 3045 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
342 | 3046 // We keep a separate list of all regions that have been alloc regions in |
636 | 3047 // the current collection pause. Forget that now. This method will |
3048 // untag the GC alloc regions and tear down the GC alloc region | |
3049 // list. It's desirable that no regions are tagged as GC alloc | |
3050 // outside GCs. | |
342 | 3051 forget_alloc_region_list(); |
3052 | |
3053 // The current alloc regions contain objs that have survived | |
3054 // collection. Make them no longer GC alloc regions. | |
3055 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3056 HeapRegion* r = _gc_alloc_regions[ap]; | |
636 | 3057 _retained_gc_alloc_regions[ap] = NULL; |
861
45d97a99715b
6862661: G1: _gc_alloc_region_counts is not updated properly after 6604422
apetrusenko
parents:
846
diff
changeset
|
3058 _gc_alloc_region_counts[ap] = 0; |
636 | 3059 |
3060 if (r != NULL) { | |
3061 // we retain nothing on _gc_alloc_regions between GCs | |
3062 set_gc_alloc_region(ap, NULL); | |
3063 | |
3064 if (r->is_empty()) { | |
3065 // we didn't actually allocate anything in it; let's just put | |
3066 // it on the free list | |
342 | 3067 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
3068 r->set_zero_fill_complete(); | |
3069 put_free_region_on_list_locked(r); | |
636 | 3070 } else if (_retain_gc_alloc_region[ap] && !totally) { |
3071 // retain it so that we can use it at the beginning of the next GC | |
3072 _retained_gc_alloc_regions[ap] = r; | |
342 | 3073 } |
3074 } | |
636 | 3075 } |
3076 } | |
3077 | |
3078 #ifndef PRODUCT | |
3079 // Useful for debugging | |
3080 | |
3081 void G1CollectedHeap::print_gc_alloc_regions() { | |
3082 gclog_or_tty->print_cr("GC alloc regions"); | |
3083 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3084 HeapRegion* r = _gc_alloc_regions[ap]; | |
3085 if (r == NULL) { | |
3086 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); | |
3087 } else { | |
3088 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, | |
3089 ap, r->bottom(), r->used()); | |
3090 } | |
3091 } | |
3092 } | |
3093 #endif // PRODUCT | |
342 | 3094 |
3095 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
3096 _drain_in_progress = false; | |
3097 set_evac_failure_closure(cl); | |
3098 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3099 } | |
3100 | |
3101 void G1CollectedHeap::finalize_for_evac_failure() { | |
3102 assert(_evac_failure_scan_stack != NULL && | |
3103 _evac_failure_scan_stack->length() == 0, | |
3104 "Postcondition"); | |
3105 assert(!_drain_in_progress, "Postcondition"); | |
3106 // Don't have to delete, since the scan stack is a resource object. | |
3107 _evac_failure_scan_stack = NULL; | |
3108 } | |
3109 | |
3110 | |
3111 | |
3112 // *** Sequential G1 Evacuation | |
3113 | |
3114 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
3115 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3116 // let the caller handle alloc failure | |
3117 if (alloc_region == NULL) return NULL; | |
3118 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
3119 "Either the object is humongous or the region isn't"); | |
3120 HeapWord* block = alloc_region->allocate(word_size); | |
3121 if (block == NULL) { | |
3122 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
3123 } | |
3124 return block; | |
3125 } | |
3126 | |
3127 class G1IsAliveClosure: public BoolObjectClosure { | |
3128 G1CollectedHeap* _g1; | |
3129 public: | |
3130 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
3131 void do_object(oop p) { assert(false, "Do not call."); } | |
3132 bool do_object_b(oop p) { | |
3133 // It is reachable if it is outside the collection set, or is inside | |
3134 // and forwarded. | |
3135 | |
3136 #ifdef G1_DEBUG | |
3137 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
3138 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
3139 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
3140 #endif // G1_DEBUG | |
3141 | |
3142 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
3143 } | |
3144 }; | |
3145 | |
3146 class G1KeepAliveClosure: public OopClosure { | |
3147 G1CollectedHeap* _g1; | |
3148 public: | |
3149 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3150 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3151 void do_oop( oop* p) { |
342 | 3152 oop obj = *p; |
3153 #ifdef G1_DEBUG | |
3154 if (PrintGC && Verbose) { | |
3155 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
3156 p, (void*) obj, (void*) *p); | |
3157 } | |
3158 #endif // G1_DEBUG | |
3159 | |
3160 if (_g1->obj_in_cs(obj)) { | |
3161 assert( obj->is_forwarded(), "invariant" ); | |
3162 *p = obj->forwardee(); | |
3163 #ifdef G1_DEBUG | |
3164 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
3165 (void*) obj, (void*) *p); | |
3166 #endif // G1_DEBUG | |
3167 } | |
3168 } | |
3169 }; | |
3170 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3171 class UpdateRSetImmediate : public OopsInHeapRegionClosure { |
342 | 3172 private: |
3173 G1CollectedHeap* _g1; | |
3174 G1RemSet* _g1_rem_set; | |
3175 public: | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3176 UpdateRSetImmediate(G1CollectedHeap* g1) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3177 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} |
342 | 3178 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3179 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3180 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3181 template <class T> void do_oop_work(T* p) { |
342 | 3182 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3183 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3184 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3185 _g1_rem_set->par_write_ref(_from, p, 0); |
342 | 3186 } |
3187 } | |
3188 }; | |
3189 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3190 class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3191 private: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3192 G1CollectedHeap* _g1; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3193 DirtyCardQueue *_dcq; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3194 CardTableModRefBS* _ct_bs; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3195 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3196 public: |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3197 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3198 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3199 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3200 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3201 virtual void do_oop( oop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3202 template <class T> void do_oop_work(T* p) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3203 assert(_from->is_in_reserved(p), "paranoia"); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3204 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3205 !_from->is_survivor()) { |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3206 size_t card_index = _ct_bs->index_for(p); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3207 if (_ct_bs->mark_card_deferred(card_index)) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3208 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3209 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3210 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3211 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3212 }; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3213 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3214 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3215 |
342 | 3216 class RemoveSelfPointerClosure: public ObjectClosure { |
3217 private: | |
3218 G1CollectedHeap* _g1; | |
3219 ConcurrentMark* _cm; | |
3220 HeapRegion* _hr; | |
3221 size_t _prev_marked_bytes; | |
3222 size_t _next_marked_bytes; | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3223 OopsInHeapRegionClosure *_cl; |
342 | 3224 public: |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3225 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3226 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3227 _next_marked_bytes(0), _cl(cl) {} |
342 | 3228 |
3229 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
3230 size_t next_marked_bytes() { return _next_marked_bytes; } | |
3231 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3232 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3233 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3234 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3235 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3236 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3237 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3238 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3239 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3240 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3241 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3242 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3243 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3244 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3245 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3246 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3247 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3248 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3249 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3250 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3251 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3252 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3253 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3254 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3255 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3256 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3257 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3258 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3259 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3260 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3261 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3262 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3263 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3264 // collection set. So, we'll recreate such entries now. |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3265 obj->oop_iterate(_cl); |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3266 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3267 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3268 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3269 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
3270 MemRegion mr((HeapWord*)obj, obj->size()); |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3271 CollectedHeap::fill_with_object(mr); |
342 | 3272 _cm->clearRangeBothMaps(mr); |
3273 } | |
3274 } | |
3275 }; | |
3276 | |
3277 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3278 UpdateRSetImmediate immediate_update(_g1h); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3279 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3280 UpdateRSetDeferred deferred_update(_g1h, &dcq); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3281 OopsInHeapRegionClosure *cl; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3282 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3283 cl = &deferred_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3284 } else { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3285 cl = &immediate_update; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3286 } |
342 | 3287 HeapRegion* cur = g1_policy()->collection_set(); |
3288 while (cur != NULL) { | |
3289 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3290 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3291 RemoveSelfPointerClosure rspc(_g1h, cl); |
342 | 3292 if (cur->evacuation_failed()) { |
3293 assert(cur->in_collection_set(), "bad CS"); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3294 cl->set_region(cur); |
342 | 3295 cur->object_iterate(&rspc); |
3296 | |
3297 // A number of manipulations to make the TAMS be the current top, | |
3298 // and the marked bytes be the ones observed in the iteration. | |
3299 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
3300 // The comments below are the postconditions achieved by the | |
3301 // calls. Note especially the last such condition, which says that | |
3302 // the count of marked bytes has been properly restored. | |
3303 cur->note_start_of_marking(false); | |
3304 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3305 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
3306 // _next_marked_bytes == prev_marked_bytes. | |
3307 cur->note_end_of_marking(); | |
3308 // _prev_top_at_mark_start == top(), | |
3309 // _prev_marked_bytes == prev_marked_bytes | |
3310 } | |
3311 // If there is no mark in progress, we modified the _next variables | |
3312 // above needlessly, but harmlessly. | |
3313 if (_g1h->mark_in_progress()) { | |
3314 cur->note_start_of_marking(false); | |
3315 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
3316 // _next_marked_bytes == next_marked_bytes. | |
3317 } | |
3318 | |
3319 // Now make sure the region has the right index in the sorted array. | |
3320 g1_policy()->note_change_in_marked_bytes(cur); | |
3321 } | |
3322 cur = cur->next_in_collection_set(); | |
3323 } | |
3324 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3325 | |
3326 // Now restore saved marks, if any. | |
3327 if (_objs_with_preserved_marks != NULL) { | |
3328 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3329 assert(_objs_with_preserved_marks->length() == | |
3330 _preserved_marks_of_objs->length(), "Both or none."); | |
3331 guarantee(_objs_with_preserved_marks->length() == | |
3332 _preserved_marks_of_objs->length(), "Both or none."); | |
3333 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3334 oop obj = _objs_with_preserved_marks->at(i); | |
3335 markOop m = _preserved_marks_of_objs->at(i); | |
3336 obj->set_mark(m); | |
3337 } | |
3338 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3339 delete _objs_with_preserved_marks; | |
3340 delete _preserved_marks_of_objs; | |
3341 _objs_with_preserved_marks = NULL; | |
3342 _preserved_marks_of_objs = NULL; | |
3343 } | |
3344 } | |
3345 | |
3346 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3347 _evac_failure_scan_stack->push(obj); | |
3348 } | |
3349 | |
3350 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3351 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3352 | |
3353 while (_evac_failure_scan_stack->length() > 0) { | |
3354 oop obj = _evac_failure_scan_stack->pop(); | |
3355 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3356 obj->oop_iterate_backwards(_evac_failure_closure); | |
3357 } | |
3358 } | |
3359 | |
3360 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3361 markOop m = old->mark(); | |
3362 // forward to self | |
3363 assert(!old->is_forwarded(), "precondition"); | |
3364 | |
3365 old->forward_to(old); | |
3366 handle_evacuation_failure_common(old, m); | |
3367 } | |
3368 | |
3369 oop | |
3370 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3371 oop old) { | |
3372 markOop m = old->mark(); | |
3373 oop forward_ptr = old->forward_to_atomic(old); | |
3374 if (forward_ptr == NULL) { | |
3375 // Forward-to-self succeeded. | |
3376 if (_evac_failure_closure != cl) { | |
3377 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3378 assert(!_drain_in_progress, | |
3379 "Should only be true while someone holds the lock."); | |
3380 // Set the global evac-failure closure to the current thread's. | |
3381 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3382 set_evac_failure_closure(cl); | |
3383 // Now do the common part. | |
3384 handle_evacuation_failure_common(old, m); | |
3385 // Reset to NULL. | |
3386 set_evac_failure_closure(NULL); | |
3387 } else { | |
3388 // The lock is already held, and this is recursive. | |
3389 assert(_drain_in_progress, "This should only be the recursive case."); | |
3390 handle_evacuation_failure_common(old, m); | |
3391 } | |
3392 return old; | |
3393 } else { | |
3394 // Someone else had a place to copy it. | |
3395 return forward_ptr; | |
3396 } | |
3397 } | |
3398 | |
3399 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3400 set_evacuation_failed(true); | |
3401 | |
3402 preserve_mark_if_necessary(old, m); | |
3403 | |
3404 HeapRegion* r = heap_region_containing(old); | |
3405 if (!r->evacuation_failed()) { | |
3406 r->set_evacuation_failed(true); | |
751 | 3407 if (G1PrintRegions) { |
342 | 3408 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
3409 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3410 r, r->bottom(), r->end()); | |
3411 } | |
3412 } | |
3413 | |
3414 push_on_evac_failure_scan_stack(old); | |
3415 | |
3416 if (!_drain_in_progress) { | |
3417 // prevent recursion in copy_to_survivor_space() | |
3418 _drain_in_progress = true; | |
3419 drain_evac_failure_scan_stack(); | |
3420 _drain_in_progress = false; | |
3421 } | |
3422 } | |
3423 | |
3424 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3425 if (m != markOopDesc::prototype()) { | |
3426 if (_objs_with_preserved_marks == NULL) { | |
3427 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3428 _objs_with_preserved_marks = | |
3429 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3430 _preserved_marks_of_objs = | |
3431 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3432 } | |
3433 _objs_with_preserved_marks->push(obj); | |
3434 _preserved_marks_of_objs->push(m); | |
3435 } | |
3436 } | |
3437 | |
3438 // *** Parallel G1 Evacuation | |
3439 | |
3440 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3441 size_t word_size) { | |
3442 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3443 // let the caller handle alloc failure | |
3444 if (alloc_region == NULL) return NULL; | |
3445 | |
3446 HeapWord* block = alloc_region->par_allocate(word_size); | |
3447 if (block == NULL) { | |
3448 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3449 Mutex::_no_safepoint_check_flag); | |
3450 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3451 } | |
3452 return block; | |
3453 } | |
3454 | |
545 | 3455 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
3456 bool par) { | |
3457 // Another thread might have obtained alloc_region for the given | |
3458 // purpose, and might be attempting to allocate in it, and might | |
3459 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3460 // region below until we're sure the last allocation has happened. | |
3461 // We ensure this by allocating the remaining space with a garbage | |
3462 // object. | |
3463 if (par) par_allocate_remaining_space(alloc_region); | |
3464 // Now we can do the post-GC stuff on the region. | |
3465 alloc_region->note_end_of_copying(); | |
3466 g1_policy()->record_after_bytes(alloc_region->used()); | |
3467 } | |
3468 | |
342 | 3469 HeapWord* |
3470 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3471 HeapRegion* alloc_region, | |
3472 bool par, | |
3473 size_t word_size) { | |
3474 HeapWord* block = NULL; | |
3475 // In the parallel case, a previous thread to obtain the lock may have | |
3476 // already assigned a new gc_alloc_region. | |
3477 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3478 assert(par, "But should only happen in parallel case."); | |
3479 alloc_region = _gc_alloc_regions[purpose]; | |
3480 if (alloc_region == NULL) return NULL; | |
3481 block = alloc_region->par_allocate(word_size); | |
3482 if (block != NULL) return block; | |
3483 // Otherwise, continue; this new region is empty, too. | |
3484 } | |
3485 assert(alloc_region != NULL, "We better have an allocation region"); | |
545 | 3486 retire_alloc_region(alloc_region, par); |
342 | 3487 |
3488 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3489 // Cannot allocate more regions for the given purpose. | |
3490 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3491 // Is there an alternative? | |
3492 if (purpose != alt_purpose) { | |
3493 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3494 // Has not the alternative region been aliased? | |
545 | 3495 if (alloc_region != alt_region && alt_region != NULL) { |
342 | 3496 // Try to allocate in the alternative region. |
3497 if (par) { | |
3498 block = alt_region->par_allocate(word_size); | |
3499 } else { | |
3500 block = alt_region->allocate(word_size); | |
3501 } | |
3502 // Make an alias. | |
3503 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
545 | 3504 if (block != NULL) { |
3505 return block; | |
3506 } | |
3507 retire_alloc_region(alt_region, par); | |
342 | 3508 } |
3509 // Both the allocation region and the alternative one are full | |
3510 // and aliased, replace them with a new allocation region. | |
3511 purpose = alt_purpose; | |
3512 } else { | |
3513 set_gc_alloc_region(purpose, NULL); | |
3514 return NULL; | |
3515 } | |
3516 } | |
3517 | |
3518 // Now allocate a new region for allocation. | |
3519 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3520 | |
3521 // let the caller handle alloc failure | |
3522 if (alloc_region != NULL) { | |
3523 | |
3524 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3525 assert(alloc_region->saved_mark_at_top(), | |
3526 "Mark should have been saved already."); | |
3527 // We used to assert that the region was zero-filled here, but no | |
3528 // longer. | |
3529 | |
3530 // This must be done last: once it's installed, other regions may | |
3531 // allocate in it (without holding the lock.) | |
3532 set_gc_alloc_region(purpose, alloc_region); | |
3533 | |
3534 if (par) { | |
3535 block = alloc_region->par_allocate(word_size); | |
3536 } else { | |
3537 block = alloc_region->allocate(word_size); | |
3538 } | |
3539 // Caller handles alloc failure. | |
3540 } else { | |
3541 // This sets other apis using the same old alloc region to NULL, also. | |
3542 set_gc_alloc_region(purpose, NULL); | |
3543 } | |
3544 return block; // May be NULL. | |
3545 } | |
3546 | |
3547 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3548 HeapWord* block = NULL; | |
3549 size_t free_words; | |
3550 do { | |
3551 free_words = r->free()/HeapWordSize; | |
3552 // If there's too little space, no one can allocate, so we're done. | |
3553 if (free_words < (size_t)oopDesc::header_size()) return; | |
3554 // Otherwise, try to claim it. | |
3555 block = r->par_allocate(free_words); | |
3556 } while (block == NULL); | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
3557 fill_with_object(block, free_words); |
342 | 3558 } |
3559 | |
3560 #ifndef PRODUCT | |
3561 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3562 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3563 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3564 return true; | |
3565 } | |
3566 #endif // PRODUCT | |
3567 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3568 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3569 : _g1h(g1h), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3570 _refs(g1h->task_queue(queue_num)), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3571 _dcq(&g1h->dirty_card_queue_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3572 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3573 _g1_rem(g1h->g1_rem_set()), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3574 _hash_seed(17), _queue_num(queue_num), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3575 _term_attempts(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3576 _age_table(false), |
342 | 3577 #if G1_DETAILED_STATS |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3578 _pushes(0), _pops(0), _steals(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3579 _steal_attempts(0), _overflow_pushes(0), |
342 | 3580 #endif |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3581 _strong_roots_time(0), _term_time(0), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3582 _alloc_buffer_waste(0), _undo_waste(0) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3583 { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3584 // we allocate G1YoungSurvRateNumRegions plus one entries, since |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3585 // we "sacrifice" entry 0 to keep track of surviving bytes for |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3586 // non-young regions (where the age is -1) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3587 // We also add a few elements at the beginning and at the end in |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3588 // an attempt to eliminate cache contention |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3589 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3590 size_t array_length = PADDING_ELEM_NUM + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3591 real_length + |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3592 PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3593 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3594 if (_surviving_young_words_base == NULL) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3595 vm_exit_out_of_memory(array_length * sizeof(size_t), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3596 "Not enough space for young surv histo."); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3597 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3598 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3599 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3600 _overflowed_refs = new OverflowQueue(10); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3601 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3602 _start = os::elapsedTime(); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3603 } |
342 | 3604 |
3605 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | |
3606 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
3607 _par_scan_state(par_scan_state) { } | |
3608 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3609 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
342 | 3610 // This is called _after_ do_oop_work has been called, hence after |
3611 // the object has been relocated to its new location and *p points | |
3612 // to its new location. | |
3613 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3614 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3615 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3616 oop obj = oopDesc::decode_heap_oop(heap_oop); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3617 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
342 | 3618 "shouldn't still be in the CSet if evacuation didn't fail."); |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3619 HeapWord* addr = (HeapWord*)obj; |
342 | 3620 if (_g1->is_in_g1_reserved(addr)) |
3621 _cm->grayRoot(oop(addr)); | |
3622 } | |
3623 } | |
3624 | |
3625 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
3626 size_t word_sz = old->size(); | |
3627 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
3628 // +1 to make the -1 indexes valid... | |
3629 int young_index = from_region->young_index_in_cset()+1; | |
3630 assert( (from_region->is_young() && young_index > 0) || | |
3631 (!from_region->is_young() && young_index == 0), "invariant" ); | |
3632 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
3633 markOop m = old->mark(); | |
545 | 3634 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
3635 : m->age(); | |
3636 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, | |
342 | 3637 word_sz); |
3638 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
3639 oop obj = oop(obj_ptr); | |
3640 | |
3641 if (obj_ptr == NULL) { | |
3642 // This will either forward-to-self, or detect that someone else has | |
3643 // installed a forwarding pointer. | |
3644 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
3645 return _g1->handle_evacuation_failure_par(cl, old); | |
3646 } | |
3647 | |
526 | 3648 // We're going to allocate linearly, so might as well prefetch ahead. |
3649 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); | |
3650 | |
342 | 3651 oop forward_ptr = old->forward_to_atomic(obj); |
3652 if (forward_ptr == NULL) { | |
3653 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
526 | 3654 if (g1p->track_object_age(alloc_purpose)) { |
3655 // We could simply do obj->incr_age(). However, this causes a | |
3656 // performance issue. obj->incr_age() will first check whether | |
3657 // the object has a displaced mark by checking its mark word; | |
3658 // getting the mark word from the new location of the object | |
3659 // stalls. So, given that we already have the mark word and we | |
3660 // are about to install it anyway, it's better to increase the | |
3661 // age on the mark word, when the object does not have a | |
3662 // displaced mark word. We're not expecting many objects to have | |
3663 // a displaced marked word, so that case is not optimized | |
3664 // further (it could be...) and we simply call obj->incr_age(). | |
3665 | |
3666 if (m->has_displaced_mark_helper()) { | |
3667 // in this case, we have to install the mark word first, | |
3668 // otherwise obj looks to be forwarded (the old mark word, | |
3669 // which contains the forward pointer, was copied) | |
3670 obj->set_mark(m); | |
3671 obj->incr_age(); | |
3672 } else { | |
3673 m = m->incr_age(); | |
545 | 3674 obj->set_mark(m); |
526 | 3675 } |
545 | 3676 _par_scan_state->age_table()->add(obj, word_sz); |
3677 } else { | |
3678 obj->set_mark(m); | |
526 | 3679 } |
3680 | |
342 | 3681 // preserve "next" mark bit |
3682 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
3683 if (!use_local_bitmaps || | |
3684 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
3685 // if we couldn't mark it on the local bitmap (this happens when | |
3686 // the object was not allocated in the GCLab), we have to bite | |
3687 // the bullet and do the standard parallel mark | |
3688 _cm->markAndGrayObjectIfNecessary(obj); | |
3689 } | |
3690 #if 1 | |
3691 if (_g1->isMarkedNext(old)) { | |
3692 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
3693 } | |
3694 #endif | |
3695 } | |
3696 | |
3697 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
3698 surv_young_words[young_index] += word_sz; | |
3699 | |
3700 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
3701 arrayOop(old)->set_length(0); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3702 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3703 _par_scan_state->push_on_queue(old_p); |
342 | 3704 } else { |
526 | 3705 // No point in using the slower heap_region_containing() method, |
3706 // given that we know obj is in the heap. | |
3707 _scanner->set_region(_g1->heap_region_containing_raw(obj)); | |
342 | 3708 obj->oop_iterate_backwards(_scanner); |
3709 } | |
3710 } else { | |
3711 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
3712 obj = forward_ptr; | |
3713 } | |
3714 return obj; | |
3715 } | |
3716 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3717 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test> |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3718 template <class T> |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3719 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test> |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3720 ::do_oop_work(T* p) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3721 oop obj = oopDesc::load_decode_heap_oop(p); |
342 | 3722 assert(barrier != G1BarrierRS || obj != NULL, |
3723 "Precondition: G1BarrierRS implies obj is nonNull"); | |
3724 | |
526 | 3725 // The only time we skip the cset test is when we're scanning |
3726 // references popped from the queue. And we only push on the queue | |
3727 // references that we know point into the cset, so no point in | |
3728 // checking again. But we'll leave an assert here for peace of mind. | |
3729 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); | |
3730 | |
3731 // here the null check is implicit in the cset_fast_test() test | |
3732 if (skip_cset_test || _g1->in_cset_fast_test(obj)) { | |
342 | 3733 #if G1_REM_SET_LOGGING |
526 | 3734 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
3735 "into CS.", p, (void*) obj); | |
342 | 3736 #endif |
526 | 3737 if (obj->is_forwarded()) { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3738 oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
526 | 3739 } else { |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3740 oop copy_oop = copy_to_survivor_space(obj); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3741 oopDesc::encode_store_heap_oop(p, copy_oop); |
342 | 3742 } |
526 | 3743 // When scanning the RS, we only care about objs in CS. |
3744 if (barrier == G1BarrierRS) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3745 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
342 | 3746 } |
526 | 3747 } |
3748 | |
3749 // When scanning moved objs, must look at all oops. | |
3750 if (barrier == G1BarrierEvac && obj != NULL) { | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3751 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
526 | 3752 } |
3753 | |
3754 if (do_gen_barrier && obj != NULL) { | |
3755 par_do_barrier(p); | |
3756 } | |
3757 } | |
3758 | |
3759 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3760 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3761 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3762 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
526 | 3763 assert(has_partial_array_mask(p), "invariant"); |
3764 oop old = clear_partial_array_mask(p); | |
342 | 3765 assert(old->is_objArray(), "must be obj array"); |
3766 assert(old->is_forwarded(), "must be forwarded"); | |
3767 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
3768 | |
3769 objArrayOop obj = objArrayOop(old->forwardee()); | |
3770 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
3771 // Process ParGCArrayScanChunk elements now | |
3772 // and push the remainder back onto queue | |
3773 int start = arrayOop(old)->length(); | |
3774 int end = obj->length(); | |
3775 int remainder = end - start; | |
3776 assert(start <= end, "just checking"); | |
3777 if (remainder > 2 * ParGCArrayScanChunk) { | |
3778 // Test above combines last partial chunk with a full chunk | |
3779 end = start + ParGCArrayScanChunk; | |
3780 arrayOop(old)->set_length(end); | |
3781 // Push remainder. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3782 oop* old_p = set_partial_array_mask(old); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3783 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3784 _par_scan_state->push_on_queue(old_p); |
342 | 3785 } else { |
3786 // Restore length so that the heap remains parsable in | |
3787 // case of evacuation failure. | |
3788 arrayOop(old)->set_length(end); | |
3789 } | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3790 _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
342 | 3791 // process our set of indices (include header in first chunk) |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3792 obj->oop_iterate_range(&_scanner, start, end); |
342 | 3793 } |
3794 | |
3795 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
3796 protected: | |
3797 G1CollectedHeap* _g1h; | |
3798 G1ParScanThreadState* _par_scan_state; | |
3799 RefToScanQueueSet* _queues; | |
3800 ParallelTaskTerminator* _terminator; | |
3801 | |
3802 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
3803 RefToScanQueueSet* queues() { return _queues; } | |
3804 ParallelTaskTerminator* terminator() { return _terminator; } | |
3805 | |
3806 public: | |
3807 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
3808 G1ParScanThreadState* par_scan_state, | |
3809 RefToScanQueueSet* queues, | |
3810 ParallelTaskTerminator* terminator) | |
3811 : _g1h(g1h), _par_scan_state(par_scan_state), | |
3812 _queues(queues), _terminator(terminator) {} | |
3813 | |
3814 void do_void() { | |
3815 G1ParScanThreadState* pss = par_scan_state(); | |
3816 while (true) { | |
3817 pss->trim_queue(); | |
3818 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3819 |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3820 StarTask stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3821 if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
342 | 3822 IF_G1_DETAILED_STATS(pss->note_steal()); |
526 | 3823 |
3824 // slightly paranoid tests; I'm trying to catch potential | |
3825 // problems before we go into push_on_queue to know where the | |
3826 // problem is coming from | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3827 assert((oop*)stolen_task != NULL, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3828 if (stolen_task.is_narrow()) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3829 assert(UseCompressedOops, "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3830 narrowOop* p = (narrowOop*) stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3831 assert(has_partial_array_mask(p) || |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3832 _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3833 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3834 } else { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3835 oop* p = (oop*) stolen_task; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3836 assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error"); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3837 pss->push_on_queue(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3838 } |
342 | 3839 continue; |
3840 } | |
3841 pss->start_term_time(); | |
3842 if (terminator()->offer_termination()) break; | |
3843 pss->end_term_time(); | |
3844 } | |
3845 pss->end_term_time(); | |
3846 pss->retire_alloc_buffers(); | |
3847 } | |
3848 }; | |
3849 | |
3850 class G1ParTask : public AbstractGangTask { | |
3851 protected: | |
3852 G1CollectedHeap* _g1h; | |
3853 RefToScanQueueSet *_queues; | |
3854 ParallelTaskTerminator _terminator; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3855 int _n_workers; |
342 | 3856 |
3857 Mutex _stats_lock; | |
3858 Mutex* stats_lock() { return &_stats_lock; } | |
3859 | |
3860 size_t getNCards() { | |
3861 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
3862 / G1BlockOffsetSharedArray::N_bytes; | |
3863 } | |
3864 | |
3865 public: | |
3866 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
3867 : AbstractGangTask("G1 collection"), | |
3868 _g1h(g1h), | |
3869 _queues(task_queues), | |
3870 _terminator(workers, _queues), | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3871 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3872 _n_workers(workers) |
342 | 3873 {} |
3874 | |
3875 RefToScanQueueSet* queues() { return _queues; } | |
3876 | |
3877 RefToScanQueue *work_queue(int i) { | |
3878 return queues()->queue(i); | |
3879 } | |
3880 | |
3881 void work(int i) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
3882 if (i >= _n_workers) return; // no work needed this round |
342 | 3883 ResourceMark rm; |
3884 HandleMark hm; | |
3885 | |
526 | 3886 G1ParScanThreadState pss(_g1h, i); |
3887 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
3888 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); | |
3889 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
342 | 3890 |
3891 pss.set_evac_closure(&scan_evac_cl); | |
3892 pss.set_evac_failure_closure(&evac_failure_cl); | |
3893 pss.set_partial_scan_closure(&partial_scan_cl); | |
3894 | |
3895 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
3896 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
3897 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
3898 |
342 | 3899 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
3900 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
3901 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
3902 | |
3903 OopsInHeapRegionClosure *scan_root_cl; | |
3904 OopsInHeapRegionClosure *scan_perm_cl; | |
3905 OopsInHeapRegionClosure *scan_so_cl; | |
3906 | |
3907 if (_g1h->g1_policy()->should_initiate_conc_mark()) { | |
3908 scan_root_cl = &scan_mark_root_cl; | |
3909 scan_perm_cl = &scan_mark_perm_cl; | |
3910 scan_so_cl = &scan_mark_heap_rs_cl; | |
3911 } else { | |
3912 scan_root_cl = &only_scan_root_cl; | |
3913 scan_perm_cl = &only_scan_perm_cl; | |
3914 scan_so_cl = &only_scan_heap_rs_cl; | |
3915 } | |
3916 | |
3917 pss.start_strong_roots(); | |
3918 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
3919 SharedHeap::SO_AllClasses, | |
3920 scan_root_cl, | |
3921 &only_scan_heap_rs_cl, | |
3922 scan_so_cl, | |
3923 scan_perm_cl, | |
3924 i); | |
3925 pss.end_strong_roots(); | |
3926 { | |
3927 double start = os::elapsedTime(); | |
3928 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
3929 evac.do_void(); | |
3930 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
3931 double term_ms = pss.term_time()*1000.0; | |
3932 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
3933 _g1h->g1_policy()->record_termination_time(i, term_ms); | |
3934 } | |
751 | 3935 if (G1UseSurvivorSpaces) { |
545 | 3936 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
3937 } | |
342 | 3938 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
3939 | |
3940 // Clean up any par-expanded rem sets. | |
3941 HeapRegionRemSet::par_cleanup(); | |
3942 | |
3943 MutexLocker x(stats_lock()); | |
3944 if (ParallelGCVerbose) { | |
3945 gclog_or_tty->print("Thread %d complete:\n", i); | |
3946 #if G1_DETAILED_STATS | |
3947 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", | |
3948 pss.pushes(), | |
3949 pss.pops(), | |
3950 pss.overflow_pushes(), | |
3951 pss.steals(), | |
3952 pss.steal_attempts()); | |
3953 #endif | |
3954 double elapsed = pss.elapsed(); | |
3955 double strong_roots = pss.strong_roots_time(); | |
3956 double term = pss.term_time(); | |
3957 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" | |
3958 " Strong roots: %7.2f ms (%6.2f%%)\n" | |
3959 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", | |
3960 elapsed * 1000.0, | |
3961 strong_roots * 1000.0, (strong_roots*100.0/elapsed), | |
3962 term * 1000.0, (term*100.0/elapsed), | |
3963 pss.term_attempts()); | |
3964 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); | |
3965 gclog_or_tty->print(" Waste: %8dK\n" | |
3966 " Alloc Buffer: %8dK\n" | |
3967 " Undo: %8dK\n", | |
3968 (total_waste * HeapWordSize) / K, | |
3969 (pss.alloc_buffer_waste() * HeapWordSize) / K, | |
3970 (pss.undo_waste() * HeapWordSize) / K); | |
3971 } | |
3972 | |
3973 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
3974 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
3975 } | |
3976 }; | |
3977 | |
3978 // *** Common G1 Evacuation Stuff | |
3979 | |
3980 void | |
3981 G1CollectedHeap:: | |
3982 g1_process_strong_roots(bool collecting_perm_gen, | |
3983 SharedHeap::ScanningOption so, | |
3984 OopClosure* scan_non_heap_roots, | |
3985 OopsInHeapRegionClosure* scan_rs, | |
3986 OopsInHeapRegionClosure* scan_so, | |
3987 OopsInGenClosure* scan_perm, | |
3988 int worker_i) { | |
3989 // First scan the strong roots, including the perm gen. | |
3990 double ext_roots_start = os::elapsedTime(); | |
3991 double closure_app_time_sec = 0.0; | |
3992 | |
3993 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
3994 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
3995 buf_scan_perm.set_generation(perm_gen()); | |
3996 | |
3997 process_strong_roots(collecting_perm_gen, so, | |
3998 &buf_scan_non_heap_roots, | |
3999 &buf_scan_perm); | |
4000 // Finish up any enqueued closure apps. | |
4001 buf_scan_non_heap_roots.done(); | |
4002 buf_scan_perm.done(); | |
4003 double ext_roots_end = os::elapsedTime(); | |
4004 g1_policy()->reset_obj_copy_time(worker_i); | |
4005 double obj_copy_time_sec = | |
4006 buf_scan_non_heap_roots.closure_app_seconds() + | |
4007 buf_scan_perm.closure_app_seconds(); | |
4008 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4009 double ext_root_time_ms = | |
4010 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4011 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4012 | |
4013 // Scan strong roots in mark stack. | |
4014 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4015 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4016 } | |
4017 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4018 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4019 | |
4020 // XXX What should this be doing in the parallel case? | |
4021 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4022 if (scan_so != NULL) { | |
4023 scan_scan_only_set(scan_so, worker_i); | |
4024 } | |
4025 // Now scan the complement of the collection set. | |
4026 if (scan_rs != NULL) { | |
4027 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4028 } | |
4029 // Finish with the ref_processor roots. | |
4030 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4031 ref_processor()->oops_do(scan_non_heap_roots); | |
4032 } | |
4033 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4034 _process_strong_tasks->all_tasks_completed(); | |
4035 } | |
4036 | |
4037 void | |
4038 G1CollectedHeap::scan_scan_only_region(HeapRegion* r, | |
4039 OopsInHeapRegionClosure* oc, | |
4040 int worker_i) { | |
4041 HeapWord* startAddr = r->bottom(); | |
4042 HeapWord* endAddr = r->used_region().end(); | |
4043 | |
4044 oc->set_region(r); | |
4045 | |
4046 HeapWord* p = r->bottom(); | |
4047 HeapWord* t = r->top(); | |
4048 guarantee( p == r->next_top_at_mark_start(), "invariant" ); | |
4049 while (p < t) { | |
4050 oop obj = oop(p); | |
4051 p += obj->oop_iterate(oc); | |
4052 } | |
4053 } | |
4054 | |
4055 void | |
4056 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
4057 int worker_i) { | |
4058 double start = os::elapsedTime(); | |
4059 | |
4060 BufferingOopsInHeapRegionClosure boc(oc); | |
4061 | |
4062 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); | |
4063 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); | |
4064 | |
4065 OopsInHeapRegionClosure *foc; | |
4066 if (g1_policy()->should_initiate_conc_mark()) | |
4067 foc = &scan_and_mark; | |
4068 else | |
4069 foc = &scan_only; | |
4070 | |
4071 HeapRegion* hr; | |
4072 int n = 0; | |
4073 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { | |
4074 scan_scan_only_region(hr, foc, worker_i); | |
4075 ++n; | |
4076 } | |
4077 boc.done(); | |
4078 | |
4079 double closure_app_s = boc.closure_app_seconds(); | |
4080 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); | |
4081 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; | |
4082 g1_policy()->record_scan_only_time(worker_i, ms, n); | |
4083 } | |
4084 | |
4085 void | |
4086 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4087 OopClosure* non_root_closure) { | |
4088 SharedHeap::process_weak_roots(root_closure, non_root_closure); | |
4089 } | |
4090 | |
4091 | |
4092 class SaveMarksClosure: public HeapRegionClosure { | |
4093 public: | |
4094 bool doHeapRegion(HeapRegion* r) { | |
4095 r->save_marks(); | |
4096 return false; | |
4097 } | |
4098 }; | |
4099 | |
4100 void G1CollectedHeap::save_marks() { | |
4101 if (ParallelGCThreads == 0) { | |
4102 SaveMarksClosure sm; | |
4103 heap_region_iterate(&sm); | |
4104 } | |
4105 // We do this even in the parallel case | |
4106 perm_gen()->save_marks(); | |
4107 } | |
4108 | |
4109 void G1CollectedHeap::evacuate_collection_set() { | |
4110 set_evacuation_failed(false); | |
4111 | |
4112 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4113 concurrent_g1_refine()->set_use_cache(false); | |
889 | 4114 concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
4115 | |
342 | 4116 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
4117 set_par_threads(n_workers); | |
4118 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4119 | |
4120 init_for_evac_failure(NULL); | |
4121 | |
4122 change_strong_roots_parity(); // In preparation for parallel strong roots. | |
4123 rem_set()->prepare_for_younger_refs_iterate(true); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4124 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4125 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
342 | 4126 double start_par = os::elapsedTime(); |
4127 if (ParallelGCThreads > 0) { | |
4128 // The individual threads will set their evac-failure closures. | |
4129 workers()->run_task(&g1_par_task); | |
4130 } else { | |
4131 g1_par_task.work(0); | |
4132 } | |
4133 | |
4134 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4135 g1_policy()->record_par_time(par_time); | |
4136 set_par_threads(0); | |
4137 // Is this the right thing to do here? We don't save marks | |
4138 // on individual heap regions when we allocate from | |
4139 // them in parallel, so this seems like the correct place for this. | |
545 | 4140 retire_all_alloc_regions(); |
342 | 4141 { |
4142 G1IsAliveClosure is_alive(this); | |
4143 G1KeepAliveClosure keep_alive(this); | |
4144 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4145 } | |
4146 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4147 |
889 | 4148 concurrent_g1_refine()->clear_hot_cache(); |
342 | 4149 concurrent_g1_refine()->set_use_cache(true); |
4150 | |
4151 finalize_for_evac_failure(); | |
4152 | |
4153 // Must do this before removing self-forwarding pointers, which clears | |
4154 // the per-region evac-failure flags. | |
4155 concurrent_mark()->complete_marking_in_collection_set(); | |
4156 | |
4157 if (evacuation_failed()) { | |
4158 remove_self_forwarding_pointers(); | |
4159 if (PrintGCDetails) { | |
4160 gclog_or_tty->print(" (evacuation failed)"); | |
4161 } else if (PrintGC) { | |
4162 gclog_or_tty->print("--"); | |
4163 } | |
4164 } | |
4165 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4166 if (G1DeferredRSUpdate) { |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4167 RedirtyLoggedCardTableEntryFastClosure redirty; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4168 dirty_card_queue_set().set_closure(&redirty); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4169 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4170 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set()); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4171 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4172 } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
595
diff
changeset
|
4173 |
342 | 4174 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
4175 } | |
4176 | |
4177 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4178 size_t pre_used = 0; | |
4179 size_t cleared_h_regions = 0; | |
4180 size_t freed_regions = 0; | |
4181 UncleanRegionList local_list; | |
4182 | |
4183 HeapWord* start = hr->bottom(); | |
4184 HeapWord* end = hr->prev_top_at_mark_start(); | |
4185 size_t used_bytes = hr->used(); | |
4186 size_t live_bytes = hr->max_live_bytes(); | |
4187 if (used_bytes > 0) { | |
4188 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4189 } else { | |
4190 guarantee( live_bytes == 0, "invariant" ); | |
4191 } | |
4192 | |
4193 size_t garbage_bytes = used_bytes - live_bytes; | |
4194 if (garbage_bytes > 0) | |
4195 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4196 | |
4197 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4198 &local_list); | |
4199 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4200 &local_list); | |
4201 } | |
4202 | |
4203 void | |
4204 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4205 size_t& pre_used, | |
4206 size_t& cleared_h_regions, | |
4207 size_t& freed_regions, | |
4208 UncleanRegionList* list, | |
4209 bool par) { | |
4210 pre_used += hr->used(); | |
4211 if (hr->isHumongous()) { | |
4212 assert(hr->startsHumongous(), | |
4213 "Only the start of a humongous region should be freed."); | |
4214 int ind = _hrs->find(hr); | |
4215 assert(ind != -1, "Should have an index."); | |
4216 // Clear the start region. | |
4217 hr->hr_clear(par, true /*clear_space*/); | |
4218 list->insert_before_head(hr); | |
4219 cleared_h_regions++; | |
4220 freed_regions++; | |
4221 // Clear any continued regions. | |
4222 ind++; | |
4223 while ((size_t)ind < n_regions()) { | |
4224 HeapRegion* hrc = _hrs->at(ind); | |
4225 if (!hrc->continuesHumongous()) break; | |
4226 // Otherwise, does continue the H region. | |
4227 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4228 hrc->hr_clear(par, true /*clear_space*/); | |
4229 cleared_h_regions++; | |
4230 freed_regions++; | |
4231 list->insert_before_head(hrc); | |
4232 ind++; | |
4233 } | |
4234 } else { | |
4235 hr->hr_clear(par, true /*clear_space*/); | |
4236 list->insert_before_head(hr); | |
4237 freed_regions++; | |
4238 // If we're using clear2, this should not be enabled. | |
4239 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4240 } | |
4241 } | |
4242 | |
4243 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4244 size_t cleared_h_regions, | |
4245 size_t freed_regions, | |
4246 UncleanRegionList* list) { | |
4247 if (list != NULL && list->sz() > 0) { | |
4248 prepend_region_list_on_unclean_list(list); | |
4249 } | |
4250 // Acquire a lock, if we're parallel, to update possibly-shared | |
4251 // variables. | |
4252 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4253 { | |
4254 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4255 _summary_bytes_used -= pre_used; | |
4256 _num_humongous_regions -= (int) cleared_h_regions; | |
4257 _free_regions += freed_regions; | |
4258 } | |
4259 } | |
4260 | |
4261 | |
4262 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4263 while (list != NULL) { | |
4264 guarantee( list->is_young(), "invariant" ); | |
4265 | |
4266 HeapWord* bottom = list->bottom(); | |
4267 HeapWord* end = list->end(); | |
4268 MemRegion mr(bottom, end); | |
4269 ct_bs->dirty(mr); | |
4270 | |
4271 list = list->get_next_young_region(); | |
4272 } | |
4273 } | |
4274 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4275 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4276 class G1ParCleanupCTTask : public AbstractGangTask { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4277 CardTableModRefBS* _ct_bs; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4278 G1CollectedHeap* _g1h; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4279 public: |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4280 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4281 G1CollectedHeap* g1h) : |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4282 AbstractGangTask("G1 Par Cleanup CT Task"), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4283 _ct_bs(ct_bs), |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4284 _g1h(g1h) |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4285 { } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4286 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4287 void work(int i) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4288 HeapRegion* r; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4289 while (r = _g1h->pop_dirty_cards_region()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4290 clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4291 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4292 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4293 void clear_cards(HeapRegion* r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4294 // Cards for Survivor and Scan-Only regions will be dirtied later. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4295 if (!r->is_scan_only() && !r->is_survivor()) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4296 _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4297 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4298 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4299 }; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4300 |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4301 |
342 | 4302 void G1CollectedHeap::cleanUpCardTable() { |
4303 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4304 double start = os::elapsedTime(); | |
4305 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4306 // Iterate over the dirty cards region list. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4307 G1ParCleanupCTTask cleanup_task(ct_bs, this); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4308 if (ParallelGCThreads > 0) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4309 set_par_threads(workers()->total_workers()); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4310 workers()->run_task(&cleanup_task); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4311 set_par_threads(0); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4312 } else { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4313 while (_dirty_cards_region_list) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4314 HeapRegion* r = _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4315 cleanup_task.clear_cards(r); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4316 _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4317 if (_dirty_cards_region_list == r) { |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4318 // The last region. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4319 _dirty_cards_region_list = NULL; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4320 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4321 r->set_next_dirty_cards_region(NULL); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4322 } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4323 } |
342 | 4324 // now, redirty the cards of the scan-only and survivor regions |
4325 // (it seemed faster to do it this way, instead of iterating over | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
794
diff
changeset
|
4326 // all regions and then clearing / dirtying as appropriate) |
342 | 4327 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); |
4328 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); | |
4329 | |
4330 double elapsed = os::elapsedTime() - start; | |
4331 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
4332 } | |
4333 | |
4334 | |
4335 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4336 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4337 do_collection_pause(); | |
4338 } | |
4339 } | |
4340 | |
4341 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4342 double young_time_ms = 0.0; | |
4343 double non_young_time_ms = 0.0; | |
4344 | |
4345 G1CollectorPolicy* policy = g1_policy(); | |
4346 | |
4347 double start_sec = os::elapsedTime(); | |
4348 bool non_young = true; | |
4349 | |
4350 HeapRegion* cur = cs_head; | |
4351 int age_bound = -1; | |
4352 size_t rs_lengths = 0; | |
4353 | |
4354 while (cur != NULL) { | |
4355 if (non_young) { | |
4356 if (cur->is_young()) { | |
4357 double end_sec = os::elapsedTime(); | |
4358 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4359 non_young_time_ms += elapsed_ms; | |
4360 | |
4361 start_sec = os::elapsedTime(); | |
4362 non_young = false; | |
4363 } | |
4364 } else { | |
4365 if (!cur->is_on_free_list()) { | |
4366 double end_sec = os::elapsedTime(); | |
4367 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4368 young_time_ms += elapsed_ms; | |
4369 | |
4370 start_sec = os::elapsedTime(); | |
4371 non_young = true; | |
4372 } | |
4373 } | |
4374 | |
4375 rs_lengths += cur->rem_set()->occupied(); | |
4376 | |
4377 HeapRegion* next = cur->next_in_collection_set(); | |
4378 assert(cur->in_collection_set(), "bad CS"); | |
4379 cur->set_next_in_collection_set(NULL); | |
4380 cur->set_in_collection_set(false); | |
4381 | |
4382 if (cur->is_young()) { | |
4383 int index = cur->young_index_in_cset(); | |
4384 guarantee( index != -1, "invariant" ); | |
4385 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4386 size_t words_survived = _surviving_young_words[index]; | |
4387 cur->record_surv_words_in_group(words_survived); | |
4388 } else { | |
4389 int index = cur->young_index_in_cset(); | |
4390 guarantee( index == -1, "invariant" ); | |
4391 } | |
4392 | |
4393 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4394 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4395 "invariant" ); | |
4396 | |
4397 if (!cur->evacuation_failed()) { | |
4398 // And the region is empty. | |
4399 assert(!cur->is_empty(), | |
4400 "Should not have empty regions in a CS."); | |
4401 free_region(cur); | |
4402 } else { | |
4403 guarantee( !cur->is_scan_only(), "should not be scan only" ); | |
4404 cur->uninstall_surv_rate_group(); | |
4405 if (cur->is_young()) | |
4406 cur->set_young_index_in_cset(-1); | |
4407 cur->set_not_young(); | |
4408 cur->set_evacuation_failed(false); | |
4409 } | |
4410 cur = next; | |
4411 } | |
4412 | |
4413 policy->record_max_rs_lengths(rs_lengths); | |
4414 policy->cset_regions_freed(); | |
4415 | |
4416 double end_sec = os::elapsedTime(); | |
4417 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4418 if (non_young) | |
4419 non_young_time_ms += elapsed_ms; | |
4420 else | |
4421 young_time_ms += elapsed_ms; | |
4422 | |
4423 policy->record_young_free_cset_time_ms(young_time_ms); | |
4424 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4425 } | |
4426 | |
4427 HeapRegion* | |
4428 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4429 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4430 HeapRegion* res = pop_unclean_region_list_locked(); | |
4431 if (res != NULL) { | |
4432 assert(!res->continuesHumongous() && | |
4433 res->zero_fill_state() != HeapRegion::Allocated, | |
4434 "Only free regions on unclean list."); | |
4435 if (zero_filled) { | |
4436 res->ensure_zero_filled_locked(); | |
4437 res->set_zero_fill_allocated(); | |
4438 } | |
4439 } | |
4440 return res; | |
4441 } | |
4442 | |
4443 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4444 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4445 return alloc_region_from_unclean_list_locked(zero_filled); | |
4446 } | |
4447 | |
4448 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4449 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4450 put_region_on_unclean_list_locked(r); | |
4451 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4452 } | |
4453 | |
4454 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4455 MutexLockerEx x(Cleanup_mon); | |
4456 set_unclean_regions_coming_locked(b); | |
4457 } | |
4458 | |
4459 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4460 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4461 _unclean_regions_coming = b; | |
4462 // Wake up mutator threads that might be waiting for completeCleanup to | |
4463 // finish. | |
4464 if (!b) Cleanup_mon->notify_all(); | |
4465 } | |
4466 | |
4467 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4468 MutexLockerEx x(Cleanup_mon); | |
4469 wait_for_cleanup_complete_locked(); | |
4470 } | |
4471 | |
4472 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4473 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4474 while (_unclean_regions_coming) { | |
4475 Cleanup_mon->wait(); | |
4476 } | |
4477 } | |
4478 | |
4479 void | |
4480 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4481 assert(ZF_mon->owned_by_self(), "precondition."); | |
4482 _unclean_region_list.insert_before_head(r); | |
4483 } | |
4484 | |
4485 void | |
4486 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4487 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4488 prepend_region_list_on_unclean_list_locked(list); | |
4489 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4490 } | |
4491 | |
4492 void | |
4493 G1CollectedHeap:: | |
4494 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4495 assert(ZF_mon->owned_by_self(), "precondition."); | |
4496 _unclean_region_list.prepend_list(list); | |
4497 } | |
4498 | |
4499 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4500 assert(ZF_mon->owned_by_self(), "precondition."); | |
4501 HeapRegion* res = _unclean_region_list.pop(); | |
4502 if (res != NULL) { | |
4503 // Inform ZF thread that there's a new unclean head. | |
4504 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4505 ZF_mon->notify_all(); | |
4506 } | |
4507 return res; | |
4508 } | |
4509 | |
4510 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4511 assert(ZF_mon->owned_by_self(), "precondition."); | |
4512 return _unclean_region_list.hd(); | |
4513 } | |
4514 | |
4515 | |
4516 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4517 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4518 HeapRegion* r = peek_unclean_region_list_locked(); | |
4519 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
4520 // Result of below must be equal to "r", since we hold the lock. | |
4521 (void)pop_unclean_region_list_locked(); | |
4522 put_free_region_on_list_locked(r); | |
4523 return true; | |
4524 } else { | |
4525 return false; | |
4526 } | |
4527 } | |
4528 | |
4529 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
4530 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4531 return move_cleaned_region_to_free_list_locked(); | |
4532 } | |
4533 | |
4534 | |
4535 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
4536 assert(ZF_mon->owned_by_self(), "precondition."); | |
4537 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4538 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
4539 "Regions on free list must be zero filled"); | |
4540 assert(!r->isHumongous(), "Must not be humongous."); | |
4541 assert(r->is_empty(), "Better be empty"); | |
4542 assert(!r->is_on_free_list(), | |
4543 "Better not already be on free list"); | |
4544 assert(!r->is_on_unclean_list(), | |
4545 "Better not already be on unclean list"); | |
4546 r->set_on_free_list(true); | |
4547 r->set_next_on_free_list(_free_region_list); | |
4548 _free_region_list = r; | |
4549 _free_region_list_size++; | |
4550 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4551 } | |
4552 | |
4553 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
4554 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4555 put_free_region_on_list_locked(r); | |
4556 } | |
4557 | |
4558 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
4559 assert(ZF_mon->owned_by_self(), "precondition."); | |
4560 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4561 HeapRegion* res = _free_region_list; | |
4562 if (res != NULL) { | |
4563 _free_region_list = res->next_from_free_list(); | |
4564 _free_region_list_size--; | |
4565 res->set_on_free_list(false); | |
4566 res->set_next_on_free_list(NULL); | |
4567 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4568 } | |
4569 return res; | |
4570 } | |
4571 | |
4572 | |
4573 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
4574 // By self, or on behalf of self. | |
4575 assert(Heap_lock->is_locked(), "Precondition"); | |
4576 HeapRegion* res = NULL; | |
4577 bool first = true; | |
4578 while (res == NULL) { | |
4579 if (zero_filled || !first) { | |
4580 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4581 res = pop_free_region_list_locked(); | |
4582 if (res != NULL) { | |
4583 assert(!res->zero_fill_is_allocated(), | |
4584 "No allocated regions on free list."); | |
4585 res->set_zero_fill_allocated(); | |
4586 } else if (!first) { | |
4587 break; // We tried both, time to return NULL. | |
4588 } | |
4589 } | |
4590 | |
4591 if (res == NULL) { | |
4592 res = alloc_region_from_unclean_list(zero_filled); | |
4593 } | |
4594 assert(res == NULL || | |
4595 !zero_filled || | |
4596 res->zero_fill_is_allocated(), | |
4597 "We must have allocated the region we're returning"); | |
4598 first = false; | |
4599 } | |
4600 return res; | |
4601 } | |
4602 | |
4603 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
4604 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4605 { | |
4606 HeapRegion* prev = NULL; | |
4607 HeapRegion* cur = _unclean_region_list.hd(); | |
4608 while (cur != NULL) { | |
4609 HeapRegion* next = cur->next_from_unclean_list(); | |
4610 if (cur->zero_fill_is_allocated()) { | |
4611 // Remove from the list. | |
4612 if (prev == NULL) { | |
4613 (void)_unclean_region_list.pop(); | |
4614 } else { | |
4615 _unclean_region_list.delete_after(prev); | |
4616 } | |
4617 cur->set_on_unclean_list(false); | |
4618 cur->set_next_on_unclean_list(NULL); | |
4619 } else { | |
4620 prev = cur; | |
4621 } | |
4622 cur = next; | |
4623 } | |
4624 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
4625 "Inv"); | |
4626 } | |
4627 | |
4628 { | |
4629 HeapRegion* prev = NULL; | |
4630 HeapRegion* cur = _free_region_list; | |
4631 while (cur != NULL) { | |
4632 HeapRegion* next = cur->next_from_free_list(); | |
4633 if (cur->zero_fill_is_allocated()) { | |
4634 // Remove from the list. | |
4635 if (prev == NULL) { | |
4636 _free_region_list = cur->next_from_free_list(); | |
4637 } else { | |
4638 prev->set_next_on_free_list(cur->next_from_free_list()); | |
4639 } | |
4640 cur->set_on_free_list(false); | |
4641 cur->set_next_on_free_list(NULL); | |
4642 _free_region_list_size--; | |
4643 } else { | |
4644 prev = cur; | |
4645 } | |
4646 cur = next; | |
4647 } | |
4648 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4649 } | |
4650 } | |
4651 | |
4652 bool G1CollectedHeap::verify_region_lists() { | |
4653 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4654 return verify_region_lists_locked(); | |
4655 } | |
4656 | |
4657 bool G1CollectedHeap::verify_region_lists_locked() { | |
4658 HeapRegion* unclean = _unclean_region_list.hd(); | |
4659 while (unclean != NULL) { | |
4660 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
4661 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
4662 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
4663 "Everything else is possible."); | |
4664 unclean = unclean->next_from_unclean_list(); | |
4665 } | |
4666 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
4667 | |
4668 HeapRegion* free_r = _free_region_list; | |
4669 while (free_r != NULL) { | |
4670 assert(free_r->is_on_free_list(), "Well, it is!"); | |
4671 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
4672 switch (free_r->zero_fill_state()) { | |
4673 case HeapRegion::NotZeroFilled: | |
4674 case HeapRegion::ZeroFilling: | |
4675 guarantee(false, "Should not be on free list."); | |
4676 break; | |
4677 default: | |
4678 // Everything else is possible. | |
4679 break; | |
4680 } | |
4681 free_r = free_r->next_from_free_list(); | |
4682 } | |
4683 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
4684 // If we didn't do an assertion... | |
4685 return true; | |
4686 } | |
4687 | |
4688 size_t G1CollectedHeap::free_region_list_length() { | |
4689 assert(ZF_mon->owned_by_self(), "precondition."); | |
4690 size_t len = 0; | |
4691 HeapRegion* cur = _free_region_list; | |
4692 while (cur != NULL) { | |
4693 len++; | |
4694 cur = cur->next_from_free_list(); | |
4695 } | |
4696 return len; | |
4697 } | |
4698 | |
4699 size_t G1CollectedHeap::unclean_region_list_length() { | |
4700 assert(ZF_mon->owned_by_self(), "precondition."); | |
4701 return _unclean_region_list.length(); | |
4702 } | |
4703 | |
4704 size_t G1CollectedHeap::n_regions() { | |
4705 return _hrs->length(); | |
4706 } | |
4707 | |
4708 size_t G1CollectedHeap::max_regions() { | |
4709 return | |
4710 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
4711 HeapRegion::GrainBytes; | |
4712 } | |
4713 | |
4714 size_t G1CollectedHeap::free_regions() { | |
4715 /* Possibly-expensive assert. | |
4716 assert(_free_regions == count_free_regions(), | |
4717 "_free_regions is off."); | |
4718 */ | |
4719 return _free_regions; | |
4720 } | |
4721 | |
4722 bool G1CollectedHeap::should_zf() { | |
4723 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
4724 } | |
4725 | |
4726 class RegionCounter: public HeapRegionClosure { | |
4727 size_t _n; | |
4728 public: | |
4729 RegionCounter() : _n(0) {} | |
4730 bool doHeapRegion(HeapRegion* r) { | |
677 | 4731 if (r->is_empty()) { |
342 | 4732 assert(!r->isHumongous(), "H regions should not be empty."); |
4733 _n++; | |
4734 } | |
4735 return false; | |
4736 } | |
4737 int res() { return (int) _n; } | |
4738 }; | |
4739 | |
4740 size_t G1CollectedHeap::count_free_regions() { | |
4741 RegionCounter rc; | |
4742 heap_region_iterate(&rc); | |
4743 size_t n = rc.res(); | |
4744 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
4745 n--; | |
4746 return n; | |
4747 } | |
4748 | |
4749 size_t G1CollectedHeap::count_free_regions_list() { | |
4750 size_t n = 0; | |
4751 size_t o = 0; | |
4752 ZF_mon->lock_without_safepoint_check(); | |
4753 HeapRegion* cur = _free_region_list; | |
4754 while (cur != NULL) { | |
4755 cur = cur->next_from_free_list(); | |
4756 n++; | |
4757 } | |
4758 size_t m = unclean_region_list_length(); | |
4759 ZF_mon->unlock(); | |
4760 return n + m; | |
4761 } | |
4762 | |
4763 bool G1CollectedHeap::should_set_young_locked() { | |
4764 assert(heap_lock_held_for_gc(), | |
4765 "the heap lock should already be held by or for this thread"); | |
4766 return (g1_policy()->in_young_gc_mode() && | |
4767 g1_policy()->should_add_next_region_to_young_list()); | |
4768 } | |
4769 | |
4770 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
4771 assert(heap_lock_held_for_gc(), | |
4772 "the heap lock should already be held by or for this thread"); | |
4773 _young_list->push_region(hr); | |
4774 g1_policy()->set_region_short_lived(hr); | |
4775 } | |
4776 | |
4777 class NoYoungRegionsClosure: public HeapRegionClosure { | |
4778 private: | |
4779 bool _success; | |
4780 public: | |
4781 NoYoungRegionsClosure() : _success(true) { } | |
4782 bool doHeapRegion(HeapRegion* r) { | |
4783 if (r->is_young()) { | |
4784 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
4785 r->bottom(), r->end()); | |
4786 _success = false; | |
4787 } | |
4788 return false; | |
4789 } | |
4790 bool success() { return _success; } | |
4791 }; | |
4792 | |
4793 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, | |
4794 bool check_sample) { | |
4795 bool ret = true; | |
4796 | |
4797 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); | |
4798 if (!ignore_scan_only_list) { | |
4799 NoYoungRegionsClosure closure; | |
4800 heap_region_iterate(&closure); | |
4801 ret = ret && closure.success(); | |
4802 } | |
4803 | |
4804 return ret; | |
4805 } | |
4806 | |
4807 void G1CollectedHeap::empty_young_list() { | |
4808 assert(heap_lock_held_for_gc(), | |
4809 "the heap lock should already be held by or for this thread"); | |
4810 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
4811 | |
4812 _young_list->empty_list(); | |
4813 } | |
4814 | |
4815 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
4816 bool no_allocs = true; | |
4817 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
4818 HeapRegion* r = _gc_alloc_regions[ap]; | |
4819 no_allocs = r == NULL || r->saved_mark_at_top(); | |
4820 } | |
4821 return no_allocs; | |
4822 } | |
4823 | |
545 | 4824 void G1CollectedHeap::retire_all_alloc_regions() { |
342 | 4825 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
4826 HeapRegion* r = _gc_alloc_regions[ap]; | |
4827 if (r != NULL) { | |
4828 // Check for aliases. | |
4829 bool has_processed_alias = false; | |
4830 for (int i = 0; i < ap; ++i) { | |
4831 if (_gc_alloc_regions[i] == r) { | |
4832 has_processed_alias = true; | |
4833 break; | |
4834 } | |
4835 } | |
4836 if (!has_processed_alias) { | |
545 | 4837 retire_alloc_region(r, false /* par */); |
342 | 4838 } |
4839 } | |
4840 } | |
4841 } | |
4842 | |
4843 | |
4844 // Done at the start of full GC. | |
4845 void G1CollectedHeap::tear_down_region_lists() { | |
4846 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4847 while (pop_unclean_region_list_locked() != NULL) ; | |
4848 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
4849 "Postconditions of loop.") | |
4850 while (pop_free_region_list_locked() != NULL) ; | |
4851 assert(_free_region_list == NULL, "Postcondition of loop."); | |
4852 if (_free_region_list_size != 0) { | |
4853 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
4854 print_on(gclog_or_tty, true /* extended */); |
342 | 4855 } |
4856 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
4857 } | |
4858 | |
4859 | |
4860 class RegionResetter: public HeapRegionClosure { | |
4861 G1CollectedHeap* _g1; | |
4862 int _n; | |
4863 public: | |
4864 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
4865 bool doHeapRegion(HeapRegion* r) { | |
4866 if (r->continuesHumongous()) return false; | |
4867 if (r->top() > r->bottom()) { | |
4868 if (r->top() < r->end()) { | |
4869 Copy::fill_to_words(r->top(), | |
4870 pointer_delta(r->end(), r->top())); | |
4871 } | |
4872 r->set_zero_fill_allocated(); | |
4873 } else { | |
4874 assert(r->is_empty(), "tautology"); | |
677 | 4875 _n++; |
4876 switch (r->zero_fill_state()) { | |
342 | 4877 case HeapRegion::NotZeroFilled: |
4878 case HeapRegion::ZeroFilling: | |
4879 _g1->put_region_on_unclean_list_locked(r); | |
4880 break; | |
4881 case HeapRegion::Allocated: | |
4882 r->set_zero_fill_complete(); | |
4883 // no break; go on to put on free list. | |
4884 case HeapRegion::ZeroFilled: | |
4885 _g1->put_free_region_on_list_locked(r); | |
4886 break; | |
4887 } | |
4888 } | |
4889 return false; | |
4890 } | |
4891 | |
4892 int getFreeRegionCount() {return _n;} | |
4893 }; | |
4894 | |
4895 // Done at the end of full GC. | |
4896 void G1CollectedHeap::rebuild_region_lists() { | |
4897 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4898 // This needs to go at the end of the full GC. | |
4899 RegionResetter rs; | |
4900 heap_region_iterate(&rs); | |
4901 _free_regions = rs.getFreeRegionCount(); | |
4902 // Tell the ZF thread it may have work to do. | |
4903 if (should_zf()) ZF_mon->notify_all(); | |
4904 } | |
4905 | |
4906 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
4907 G1CollectedHeap* _g1; | |
4908 int _n; | |
4909 public: | |
4910 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
4911 bool doHeapRegion(HeapRegion* r) { | |
4912 if (r->continuesHumongous()) return false; | |
4913 if (r->top() > r->bottom()) { | |
4914 // There are assertions in "set_zero_fill_needed()" below that | |
4915 // require top() == bottom(), so this is technically illegal. | |
4916 // We'll skirt the law here, by making that true temporarily. | |
4917 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
4918 r->set_top(r->bottom())); | |
4919 r->set_zero_fill_needed(); | |
4920 DEBUG_ONLY(r->set_top(save_top)); | |
4921 } | |
4922 return false; | |
4923 } | |
4924 }; | |
4925 | |
4926 // Done at the start of full GC. | |
4927 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
4928 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4929 // This needs to go at the end of the full GC. | |
4930 UsedRegionsNeedZeroFillSetter rs; | |
4931 heap_region_iterate(&rs); | |
4932 } | |
4933 | |
4934 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
4935 _refine_cte_cl->set_concurrent(concurrent); | |
4936 } | |
4937 | |
4938 #ifndef PRODUCT | |
4939 | |
4940 class PrintHeapRegionClosure: public HeapRegionClosure { | |
4941 public: | |
4942 bool doHeapRegion(HeapRegion *r) { | |
4943 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
4944 if (r != NULL) { | |
4945 if (r->is_on_free_list()) | |
4946 gclog_or_tty->print("Free "); | |
4947 if (r->is_young()) | |
4948 gclog_or_tty->print("Young "); | |
4949 if (r->isHumongous()) | |
4950 gclog_or_tty->print("Is Humongous "); | |
4951 r->print(); | |
4952 } | |
4953 return false; | |
4954 } | |
4955 }; | |
4956 | |
4957 class SortHeapRegionClosure : public HeapRegionClosure { | |
4958 size_t young_regions,free_regions, unclean_regions; | |
4959 size_t hum_regions, count; | |
4960 size_t unaccounted, cur_unclean, cur_alloc; | |
4961 size_t total_free; | |
4962 HeapRegion* cur; | |
4963 public: | |
4964 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
4965 free_regions(0), unclean_regions(0), | |
4966 hum_regions(0), | |
4967 count(0), unaccounted(0), | |
4968 cur_alloc(0), total_free(0) | |
4969 {} | |
4970 bool doHeapRegion(HeapRegion *r) { | |
4971 count++; | |
4972 if (r->is_on_free_list()) free_regions++; | |
4973 else if (r->is_on_unclean_list()) unclean_regions++; | |
4974 else if (r->isHumongous()) hum_regions++; | |
4975 else if (r->is_young()) young_regions++; | |
4976 else if (r == cur) cur_alloc++; | |
4977 else unaccounted++; | |
4978 return false; | |
4979 } | |
4980 void print() { | |
4981 total_free = free_regions + unclean_regions; | |
4982 gclog_or_tty->print("%d regions\n", count); | |
4983 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
4984 total_free, free_regions, unclean_regions); | |
4985 gclog_or_tty->print("%d humongous %d young\n", | |
4986 hum_regions, young_regions); | |
4987 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
4988 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
4989 } | |
4990 }; | |
4991 | |
4992 void G1CollectedHeap::print_region_counts() { | |
4993 SortHeapRegionClosure sc(_cur_alloc_region); | |
4994 PrintHeapRegionClosure cl; | |
4995 heap_region_iterate(&cl); | |
4996 heap_region_iterate(&sc); | |
4997 sc.print(); | |
4998 print_region_accounting_info(); | |
4999 }; | |
5000 | |
5001 bool G1CollectedHeap::regions_accounted_for() { | |
5002 // TODO: regions accounting for young/survivor/tenured | |
5003 return true; | |
5004 } | |
5005 | |
5006 bool G1CollectedHeap::print_region_accounting_info() { | |
5007 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5008 free_regions(), | |
5009 count_free_regions(), count_free_regions_list(), | |
5010 _free_region_list_size, _unclean_region_list.sz()); | |
5011 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5012 (_cur_alloc_region == NULL ? 0 : 1)); | |
5013 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5014 | |
5015 // TODO: check regions accounting for young/survivor/tenured | |
5016 return true; | |
5017 } | |
5018 | |
5019 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5020 HeapRegion* hr = heap_region_containing(p); | |
5021 if (hr == NULL) { | |
5022 return is_in_permanent(p); | |
5023 } else { | |
5024 return hr->is_in(p); | |
5025 } | |
5026 } | |
5027 #endif // PRODUCT | |
5028 | |
5029 void G1CollectedHeap::g1_unimplemented() { | |
5030 // Unimplemented(); | |
5031 } |