Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 452:00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes
author | ysr |
---|---|
date | Thu, 20 Nov 2008 12:27:41 -0800 |
parents | 078b8a0d8d7c |
children | c96030fff130 |
rev | line source |
---|---|
342 | 1 /* |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
28 // turn it on so that the contents of the young list (scan-only / | |
29 // to-be-collected) are printed at "strategic" points before / during | |
30 // / after the collection --- this is useful for debugging | |
31 #define SCAN_ONLY_VERBOSE 0 | |
32 // CURRENT STATUS | |
33 // This file is under construction. Search for "FIXME". | |
34 | |
35 // INVARIANTS/NOTES | |
36 // | |
37 // All allocation activity covered by the G1CollectedHeap interface is | |
38 // serialized by acquiring the HeapLock. This happens in | |
39 // mem_allocate_work, which all such allocation functions call. | |
40 // (Note that this does not apply to TLAB allocation, which is not part | |
41 // of this interface: it is done by clients of this interface.) | |
42 | |
43 // Local to this file. | |
44 | |
45 // Finds the first HeapRegion. | |
46 // No longer used, but might be handy someday. | |
47 | |
48 class FindFirstRegionClosure: public HeapRegionClosure { | |
49 HeapRegion* _a_region; | |
50 public: | |
51 FindFirstRegionClosure() : _a_region(NULL) {} | |
52 bool doHeapRegion(HeapRegion* r) { | |
53 _a_region = r; | |
54 return true; | |
55 } | |
56 HeapRegion* result() { return _a_region; } | |
57 }; | |
58 | |
59 | |
60 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
61 SuspendibleThreadSet* _sts; | |
62 G1RemSet* _g1rs; | |
63 ConcurrentG1Refine* _cg1r; | |
64 bool _concurrent; | |
65 public: | |
66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
67 G1RemSet* g1rs, | |
68 ConcurrentG1Refine* cg1r) : | |
69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
70 {} | |
71 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); | |
73 if (_concurrent && _sts->should_yield()) { | |
74 // Caller will actually yield. | |
75 return false; | |
76 } | |
77 // Otherwise, we finished successfully; return true. | |
78 return true; | |
79 } | |
80 void set_concurrent(bool b) { _concurrent = b; } | |
81 }; | |
82 | |
83 | |
84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
85 int _calls; | |
86 G1CollectedHeap* _g1h; | |
87 CardTableModRefBS* _ctbs; | |
88 int _histo[256]; | |
89 public: | |
90 ClearLoggedCardTableEntryClosure() : | |
91 _calls(0) | |
92 { | |
93 _g1h = G1CollectedHeap::heap(); | |
94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
95 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
96 } | |
97 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
99 _calls++; | |
100 unsigned char* ujb = (unsigned char*)card_ptr; | |
101 int ind = (int)(*ujb); | |
102 _histo[ind]++; | |
103 *card_ptr = -1; | |
104 } | |
105 return true; | |
106 } | |
107 int calls() { return _calls; } | |
108 void print_histo() { | |
109 gclog_or_tty->print_cr("Card table value histogram:"); | |
110 for (int i = 0; i < 256; i++) { | |
111 if (_histo[i] != 0) { | |
112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
113 } | |
114 } | |
115 } | |
116 }; | |
117 | |
118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
119 int _calls; | |
120 G1CollectedHeap* _g1h; | |
121 CardTableModRefBS* _ctbs; | |
122 public: | |
123 RedirtyLoggedCardTableEntryClosure() : | |
124 _calls(0) | |
125 { | |
126 _g1h = G1CollectedHeap::heap(); | |
127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
128 } | |
129 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
131 _calls++; | |
132 *card_ptr = 0; | |
133 } | |
134 return true; | |
135 } | |
136 int calls() { return _calls; } | |
137 }; | |
138 | |
139 YoungList::YoungList(G1CollectedHeap* g1h) | |
140 : _g1h(g1h), _head(NULL), | |
141 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), | |
142 _length(0), _scan_only_length(0), | |
143 _last_sampled_rs_lengths(0), | |
144 _survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0) | |
145 { | |
146 guarantee( check_list_empty(false), "just making sure..." ); | |
147 } | |
148 | |
149 void YoungList::push_region(HeapRegion *hr) { | |
150 assert(!hr->is_young(), "should not already be young"); | |
151 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
152 | |
153 hr->set_next_young_region(_head); | |
154 _head = hr; | |
155 | |
156 hr->set_young(); | |
157 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
158 ++_length; | |
159 } | |
160 | |
161 void YoungList::add_survivor_region(HeapRegion* hr) { | |
162 assert(!hr->is_survivor(), "should not already be for survived"); | |
163 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
164 | |
165 hr->set_next_young_region(_survivor_head); | |
166 if (_survivor_head == NULL) { | |
167 _survivors_tail = hr; | |
168 } | |
169 _survivor_head = hr; | |
170 | |
171 hr->set_survivor(); | |
172 ++_survivor_length; | |
173 } | |
174 | |
175 HeapRegion* YoungList::pop_region() { | |
176 while (_head != NULL) { | |
177 assert( length() > 0, "list should not be empty" ); | |
178 HeapRegion* ret = _head; | |
179 _head = ret->get_next_young_region(); | |
180 ret->set_next_young_region(NULL); | |
181 --_length; | |
182 assert(ret->is_young(), "region should be very young"); | |
183 | |
184 // Replace 'Survivor' region type with 'Young'. So the region will | |
185 // be treated as a young region and will not be 'confused' with | |
186 // newly created survivor regions. | |
187 if (ret->is_survivor()) { | |
188 ret->set_young(); | |
189 } | |
190 | |
191 if (!ret->is_scan_only()) { | |
192 return ret; | |
193 } | |
194 | |
195 // scan-only, we'll add it to the scan-only list | |
196 if (_scan_only_tail == NULL) { | |
197 guarantee( _scan_only_head == NULL, "invariant" ); | |
198 | |
199 _scan_only_head = ret; | |
200 _curr_scan_only = ret; | |
201 } else { | |
202 guarantee( _scan_only_head != NULL, "invariant" ); | |
203 _scan_only_tail->set_next_young_region(ret); | |
204 } | |
205 guarantee( ret->get_next_young_region() == NULL, "invariant" ); | |
206 _scan_only_tail = ret; | |
207 | |
208 // no need to be tagged as scan-only any more | |
209 ret->set_young(); | |
210 | |
211 ++_scan_only_length; | |
212 } | |
213 assert( length() == 0, "list should be empty" ); | |
214 return NULL; | |
215 } | |
216 | |
217 void YoungList::empty_list(HeapRegion* list) { | |
218 while (list != NULL) { | |
219 HeapRegion* next = list->get_next_young_region(); | |
220 list->set_next_young_region(NULL); | |
221 list->uninstall_surv_rate_group(); | |
222 list->set_not_young(); | |
223 list = next; | |
224 } | |
225 } | |
226 | |
227 void YoungList::empty_list() { | |
228 assert(check_list_well_formed(), "young list should be well formed"); | |
229 | |
230 empty_list(_head); | |
231 _head = NULL; | |
232 _length = 0; | |
233 | |
234 empty_list(_scan_only_head); | |
235 _scan_only_head = NULL; | |
236 _scan_only_tail = NULL; | |
237 _scan_only_length = 0; | |
238 _curr_scan_only = NULL; | |
239 | |
240 empty_list(_survivor_head); | |
241 _survivor_head = NULL; | |
242 _survivors_tail = NULL; | |
243 _survivor_length = 0; | |
244 | |
245 _last_sampled_rs_lengths = 0; | |
246 | |
247 assert(check_list_empty(false), "just making sure..."); | |
248 } | |
249 | |
250 bool YoungList::check_list_well_formed() { | |
251 bool ret = true; | |
252 | |
253 size_t length = 0; | |
254 HeapRegion* curr = _head; | |
255 HeapRegion* last = NULL; | |
256 while (curr != NULL) { | |
257 if (!curr->is_young() || curr->is_scan_only()) { | |
258 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
259 "incorrectly tagged (%d, %d)", | |
260 curr->bottom(), curr->end(), | |
261 curr->is_young(), curr->is_scan_only()); | |
262 ret = false; | |
263 } | |
264 ++length; | |
265 last = curr; | |
266 curr = curr->get_next_young_region(); | |
267 } | |
268 ret = ret && (length == _length); | |
269 | |
270 if (!ret) { | |
271 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
272 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
273 length, _length); | |
274 } | |
275 | |
276 bool scan_only_ret = true; | |
277 length = 0; | |
278 curr = _scan_only_head; | |
279 last = NULL; | |
280 while (curr != NULL) { | |
281 if (!curr->is_young() || curr->is_scan_only()) { | |
282 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
283 "incorrectly tagged (%d, %d)", | |
284 curr->bottom(), curr->end(), | |
285 curr->is_young(), curr->is_scan_only()); | |
286 scan_only_ret = false; | |
287 } | |
288 ++length; | |
289 last = curr; | |
290 curr = curr->get_next_young_region(); | |
291 } | |
292 scan_only_ret = scan_only_ret && (length == _scan_only_length); | |
293 | |
294 if ( (last != _scan_only_tail) || | |
295 (_scan_only_head == NULL && _scan_only_tail != NULL) || | |
296 (_scan_only_head != NULL && _scan_only_tail == NULL) ) { | |
297 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); | |
298 scan_only_ret = false; | |
299 } | |
300 | |
301 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { | |
302 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); | |
303 scan_only_ret = false; | |
304 } | |
305 | |
306 if (!scan_only_ret) { | |
307 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); | |
308 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", | |
309 length, _scan_only_length); | |
310 } | |
311 | |
312 return ret && scan_only_ret; | |
313 } | |
314 | |
315 bool YoungList::check_list_empty(bool ignore_scan_only_list, | |
316 bool check_sample) { | |
317 bool ret = true; | |
318 | |
319 if (_length != 0) { | |
320 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
321 _length); | |
322 ret = false; | |
323 } | |
324 if (check_sample && _last_sampled_rs_lengths != 0) { | |
325 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
326 ret = false; | |
327 } | |
328 if (_head != NULL) { | |
329 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
330 ret = false; | |
331 } | |
332 if (!ret) { | |
333 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
334 } | |
335 | |
336 if (ignore_scan_only_list) | |
337 return ret; | |
338 | |
339 bool scan_only_ret = true; | |
340 if (_scan_only_length != 0) { | |
341 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", | |
342 _scan_only_length); | |
343 scan_only_ret = false; | |
344 } | |
345 if (_scan_only_head != NULL) { | |
346 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); | |
347 scan_only_ret = false; | |
348 } | |
349 if (_scan_only_tail != NULL) { | |
350 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); | |
351 scan_only_ret = false; | |
352 } | |
353 if (!scan_only_ret) { | |
354 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); | |
355 } | |
356 | |
357 return ret && scan_only_ret; | |
358 } | |
359 | |
360 void | |
361 YoungList::rs_length_sampling_init() { | |
362 _sampled_rs_lengths = 0; | |
363 _curr = _head; | |
364 } | |
365 | |
366 bool | |
367 YoungList::rs_length_sampling_more() { | |
368 return _curr != NULL; | |
369 } | |
370 | |
371 void | |
372 YoungList::rs_length_sampling_next() { | |
373 assert( _curr != NULL, "invariant" ); | |
374 _sampled_rs_lengths += _curr->rem_set()->occupied(); | |
375 _curr = _curr->get_next_young_region(); | |
376 if (_curr == NULL) { | |
377 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
378 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
379 } | |
380 } | |
381 | |
382 void | |
383 YoungList::reset_auxilary_lists() { | |
384 // We could have just "moved" the scan-only list to the young list. | |
385 // However, the scan-only list is ordered according to the region | |
386 // age in descending order, so, by moving one entry at a time, we | |
387 // ensure that it is recreated in ascending order. | |
388 | |
389 guarantee( is_empty(), "young list should be empty" ); | |
390 assert(check_list_well_formed(), "young list should be well formed"); | |
391 | |
392 // Add survivor regions to SurvRateGroup. | |
393 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
394 for (HeapRegion* curr = _survivor_head; | |
395 curr != NULL; | |
396 curr = curr->get_next_young_region()) { | |
397 _g1h->g1_policy()->set_region_survivors(curr); | |
398 } | |
399 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
400 | |
401 if (_survivor_head != NULL) { | |
402 _head = _survivor_head; | |
403 _length = _survivor_length + _scan_only_length; | |
404 _survivors_tail->set_next_young_region(_scan_only_head); | |
405 } else { | |
406 _head = _scan_only_head; | |
407 _length = _scan_only_length; | |
408 } | |
409 | |
410 for (HeapRegion* curr = _scan_only_head; | |
411 curr != NULL; | |
412 curr = curr->get_next_young_region()) { | |
413 curr->recalculate_age_in_surv_rate_group(); | |
414 } | |
415 _scan_only_head = NULL; | |
416 _scan_only_tail = NULL; | |
417 _scan_only_length = 0; | |
418 _curr_scan_only = NULL; | |
419 | |
420 _survivor_head = NULL; | |
421 _survivors_tail = NULL; | |
422 _survivor_length = 0; | |
423 _g1h->g1_policy()->finished_recalculating_age_indexes(); | |
424 | |
425 assert(check_list_well_formed(), "young list should be well formed"); | |
426 } | |
427 | |
428 void YoungList::print() { | |
429 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; | |
430 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; | |
431 | |
432 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
433 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
434 HeapRegion *curr = lists[list]; | |
435 if (curr == NULL) | |
436 gclog_or_tty->print_cr(" empty"); | |
437 while (curr != NULL) { | |
438 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
439 "age: %4d, y: %d, s-o: %d, surv: %d", | |
440 curr->bottom(), curr->end(), | |
441 curr->top(), | |
442 curr->prev_top_at_mark_start(), | |
443 curr->next_top_at_mark_start(), | |
444 curr->top_at_conc_mark_count(), | |
445 curr->age_in_surv_rate_group_cond(), | |
446 curr->is_young(), | |
447 curr->is_scan_only(), | |
448 curr->is_survivor()); | |
449 curr = curr->get_next_young_region(); | |
450 } | |
451 } | |
452 | |
453 gclog_or_tty->print_cr(""); | |
454 } | |
455 | |
456 void G1CollectedHeap::stop_conc_gc_threads() { | |
457 _cg1r->cg1rThread()->stop(); | |
458 _czft->stop(); | |
459 _cmThread->stop(); | |
460 } | |
461 | |
462 | |
463 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
464 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
465 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
466 | |
467 // Count the dirty cards at the start. | |
468 CountNonCleanMemRegionClosure count1(this); | |
469 ct_bs->mod_card_iterate(&count1); | |
470 int orig_count = count1.n(); | |
471 | |
472 // First clear the logged cards. | |
473 ClearLoggedCardTableEntryClosure clear; | |
474 dcqs.set_closure(&clear); | |
475 dcqs.apply_closure_to_all_completed_buffers(); | |
476 dcqs.iterate_closure_all_threads(false); | |
477 clear.print_histo(); | |
478 | |
479 // Now ensure that there's no dirty cards. | |
480 CountNonCleanMemRegionClosure count2(this); | |
481 ct_bs->mod_card_iterate(&count2); | |
482 if (count2.n() != 0) { | |
483 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
484 count2.n(), orig_count); | |
485 } | |
486 guarantee(count2.n() == 0, "Card table should be clean."); | |
487 | |
488 RedirtyLoggedCardTableEntryClosure redirty; | |
489 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
490 dcqs.apply_closure_to_all_completed_buffers(); | |
491 dcqs.iterate_closure_all_threads(false); | |
492 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
493 clear.calls(), orig_count); | |
494 guarantee(redirty.calls() == clear.calls(), | |
495 "Or else mechanism is broken."); | |
496 | |
497 CountNonCleanMemRegionClosure count3(this); | |
498 ct_bs->mod_card_iterate(&count3); | |
499 if (count3.n() != orig_count) { | |
500 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
501 orig_count, count3.n()); | |
502 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
503 } | |
504 | |
505 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
506 } | |
507 | |
508 // Private class members. | |
509 | |
510 G1CollectedHeap* G1CollectedHeap::_g1h; | |
511 | |
512 // Private methods. | |
513 | |
514 // Finds a HeapRegion that can be used to allocate a given size of block. | |
515 | |
516 | |
517 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
518 bool do_expand, | |
519 bool zero_filled) { | |
520 ConcurrentZFThread::note_region_alloc(); | |
521 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
522 if (res == NULL && do_expand) { | |
523 expand(word_size * HeapWordSize); | |
524 res = alloc_free_region_from_lists(zero_filled); | |
525 assert(res == NULL || | |
526 (!res->isHumongous() && | |
527 (!zero_filled || | |
528 res->zero_fill_state() == HeapRegion::Allocated)), | |
529 "Alloc Regions must be zero filled (and non-H)"); | |
530 } | |
531 if (res != NULL && res->is_empty()) _free_regions--; | |
532 assert(res == NULL || | |
533 (!res->isHumongous() && | |
534 (!zero_filled || | |
535 res->zero_fill_state() == HeapRegion::Allocated)), | |
536 "Non-young alloc Regions must be zero filled (and non-H)"); | |
537 | |
538 if (G1TraceRegions) { | |
539 if (res != NULL) { | |
540 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
541 "top "PTR_FORMAT, | |
542 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
543 } | |
544 } | |
545 | |
546 return res; | |
547 } | |
548 | |
549 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
550 size_t word_size, | |
551 bool zero_filled) { | |
552 HeapRegion* alloc_region = NULL; | |
553 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
554 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
555 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
556 _young_list->add_survivor_region(alloc_region); | |
557 } | |
558 ++_gc_alloc_region_counts[purpose]; | |
559 } else { | |
560 g1_policy()->note_alloc_region_limit_reached(purpose); | |
561 } | |
562 return alloc_region; | |
563 } | |
564 | |
565 // If could fit into free regions w/o expansion, try. | |
566 // Otherwise, if can expand, do so. | |
567 // Otherwise, if using ex regions might help, try with ex given back. | |
568 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
569 assert(regions_accounted_for(), "Region leakage!"); | |
570 | |
571 // We can't allocate H regions while cleanupComplete is running, since | |
572 // some of the regions we find to be empty might not yet be added to the | |
573 // unclean list. (If we're already at a safepoint, this call is | |
574 // unnecessary, not to mention wrong.) | |
575 if (!SafepointSynchronize::is_at_safepoint()) | |
576 wait_for_cleanup_complete(); | |
577 | |
578 size_t num_regions = | |
579 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
580 | |
581 // Special case if < one region??? | |
582 | |
583 // Remember the ft size. | |
584 size_t x_size = expansion_regions(); | |
585 | |
586 HeapWord* res = NULL; | |
587 bool eliminated_allocated_from_lists = false; | |
588 | |
589 // Can the allocation potentially fit in the free regions? | |
590 if (free_regions() >= num_regions) { | |
591 res = _hrs->obj_allocate(word_size); | |
592 } | |
593 if (res == NULL) { | |
594 // Try expansion. | |
595 size_t fs = _hrs->free_suffix(); | |
596 if (fs + x_size >= num_regions) { | |
597 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
598 res = _hrs->obj_allocate(word_size); | |
599 assert(res != NULL, "This should have worked."); | |
600 } else { | |
601 // Expansion won't help. Are there enough free regions if we get rid | |
602 // of reservations? | |
603 size_t avail = free_regions(); | |
604 if (avail >= num_regions) { | |
605 res = _hrs->obj_allocate(word_size); | |
606 if (res != NULL) { | |
607 remove_allocated_regions_from_lists(); | |
608 eliminated_allocated_from_lists = true; | |
609 } | |
610 } | |
611 } | |
612 } | |
613 if (res != NULL) { | |
614 // Increment by the number of regions allocated. | |
615 // FIXME: Assumes regions all of size GrainBytes. | |
616 #ifndef PRODUCT | |
617 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
618 HeapRegion::GrainWords)); | |
619 #endif | |
620 if (!eliminated_allocated_from_lists) | |
621 remove_allocated_regions_from_lists(); | |
622 _summary_bytes_used += word_size * HeapWordSize; | |
623 _free_regions -= num_regions; | |
624 _num_humongous_regions += (int) num_regions; | |
625 } | |
626 assert(regions_accounted_for(), "Region Leakage"); | |
627 return res; | |
628 } | |
629 | |
630 HeapWord* | |
631 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
632 bool permit_collection_pause) { | |
633 HeapWord* res = NULL; | |
634 HeapRegion* allocated_young_region = NULL; | |
635 | |
636 assert( SafepointSynchronize::is_at_safepoint() || | |
637 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
638 | |
639 if (isHumongous(word_size)) { | |
640 // Allocation of a humongous object can, in a sense, complete a | |
641 // partial region, if the previous alloc was also humongous, and | |
642 // caused the test below to succeed. | |
643 if (permit_collection_pause) | |
644 do_collection_pause_if_appropriate(word_size); | |
645 res = humongousObjAllocate(word_size); | |
646 assert(_cur_alloc_region == NULL | |
647 || !_cur_alloc_region->isHumongous(), | |
648 "Prevent a regression of this bug."); | |
649 | |
650 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
651 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
652 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
653 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
654 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
655 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
656 wait_for_cleanup_complete(); |
342 | 657 // If we do a collection pause, this will be reset to a non-NULL |
658 // value. If we don't, nulling here ensures that we allocate a new | |
659 // region below. | |
660 if (_cur_alloc_region != NULL) { | |
661 // We're finished with the _cur_alloc_region. | |
662 _summary_bytes_used += _cur_alloc_region->used(); | |
663 _cur_alloc_region = NULL; | |
664 } | |
665 assert(_cur_alloc_region == NULL, "Invariant."); | |
666 // Completion of a heap region is perhaps a good point at which to do | |
667 // a collection pause. | |
668 if (permit_collection_pause) | |
669 do_collection_pause_if_appropriate(word_size); | |
670 // Make sure we have an allocation region available. | |
671 if (_cur_alloc_region == NULL) { | |
672 if (!SafepointSynchronize::is_at_safepoint()) | |
673 wait_for_cleanup_complete(); | |
674 bool next_is_young = should_set_young_locked(); | |
675 // If the next region is not young, make sure it's zero-filled. | |
676 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
677 if (_cur_alloc_region != NULL) { | |
678 _summary_bytes_used -= _cur_alloc_region->used(); | |
679 if (next_is_young) { | |
680 set_region_short_lived_locked(_cur_alloc_region); | |
681 allocated_young_region = _cur_alloc_region; | |
682 } | |
683 } | |
684 } | |
685 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
686 "Prevent a regression of this bug."); | |
687 | |
688 // Now retry the allocation. | |
689 if (_cur_alloc_region != NULL) { | |
690 res = _cur_alloc_region->allocate(word_size); | |
691 } | |
692 } | |
693 | |
694 // NOTE: fails frequently in PRT | |
695 assert(regions_accounted_for(), "Region leakage!"); | |
696 | |
697 if (res != NULL) { | |
698 if (!SafepointSynchronize::is_at_safepoint()) { | |
699 assert( permit_collection_pause, "invariant" ); | |
700 assert( Heap_lock->owned_by_self(), "invariant" ); | |
701 Heap_lock->unlock(); | |
702 } | |
703 | |
704 if (allocated_young_region != NULL) { | |
705 HeapRegion* hr = allocated_young_region; | |
706 HeapWord* bottom = hr->bottom(); | |
707 HeapWord* end = hr->end(); | |
708 MemRegion mr(bottom, end); | |
709 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
710 } | |
711 } | |
712 | |
713 assert( SafepointSynchronize::is_at_safepoint() || | |
714 (res == NULL && Heap_lock->owned_by_self()) || | |
715 (res != NULL && !Heap_lock->owned_by_self()), | |
716 "post condition of the call" ); | |
717 | |
718 return res; | |
719 } | |
720 | |
721 HeapWord* | |
722 G1CollectedHeap::mem_allocate(size_t word_size, | |
723 bool is_noref, | |
724 bool is_tlab, | |
725 bool* gc_overhead_limit_was_exceeded) { | |
726 debug_only(check_for_valid_allocation_state()); | |
727 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
728 HeapWord* result = NULL; | |
729 | |
730 // Loop until the allocation is satisified, | |
731 // or unsatisfied after GC. | |
732 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
733 int gc_count_before; | |
734 { | |
735 Heap_lock->lock(); | |
736 result = attempt_allocation(word_size); | |
737 if (result != NULL) { | |
738 // attempt_allocation should have unlocked the heap lock | |
739 assert(is_in(result), "result not in heap"); | |
740 return result; | |
741 } | |
742 // Read the gc count while the heap lock is held. | |
743 gc_count_before = SharedHeap::heap()->total_collections(); | |
744 Heap_lock->unlock(); | |
745 } | |
746 | |
747 // Create the garbage collection operation... | |
748 VM_G1CollectForAllocation op(word_size, | |
749 gc_count_before); | |
750 | |
751 // ...and get the VM thread to execute it. | |
752 VMThread::execute(&op); | |
753 if (op.prologue_succeeded()) { | |
754 result = op.result(); | |
755 assert(result == NULL || is_in(result), "result not in heap"); | |
756 return result; | |
757 } | |
758 | |
759 // Give a warning if we seem to be looping forever. | |
760 if ((QueuedAllocationWarningCount > 0) && | |
761 (try_count % QueuedAllocationWarningCount == 0)) { | |
762 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
763 try_count); | |
764 } | |
765 } | |
766 } | |
767 | |
768 void G1CollectedHeap::abandon_cur_alloc_region() { | |
769 if (_cur_alloc_region != NULL) { | |
770 // We're finished with the _cur_alloc_region. | |
771 if (_cur_alloc_region->is_empty()) { | |
772 _free_regions++; | |
773 free_region(_cur_alloc_region); | |
774 } else { | |
775 _summary_bytes_used += _cur_alloc_region->used(); | |
776 } | |
777 _cur_alloc_region = NULL; | |
778 } | |
779 } | |
780 | |
781 class PostMCRemSetClearClosure: public HeapRegionClosure { | |
782 ModRefBarrierSet* _mr_bs; | |
783 public: | |
784 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
785 bool doHeapRegion(HeapRegion* r) { | |
786 r->reset_gc_time_stamp(); | |
787 if (r->continuesHumongous()) | |
788 return false; | |
789 HeapRegionRemSet* hrrs = r->rem_set(); | |
790 if (hrrs != NULL) hrrs->clear(); | |
791 // You might think here that we could clear just the cards | |
792 // corresponding to the used region. But no: if we leave a dirty card | |
793 // in a region we might allocate into, then it would prevent that card | |
794 // from being enqueued, and cause it to be missed. | |
795 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
796 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
797 return false; | |
798 } | |
799 }; | |
800 | |
801 | |
802 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
803 ModRefBarrierSet* _mr_bs; | |
804 public: | |
805 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
806 bool doHeapRegion(HeapRegion* r) { | |
807 if (r->continuesHumongous()) return false; | |
808 if (r->used_region().word_size() != 0) { | |
809 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
810 } | |
811 return false; | |
812 } | |
813 }; | |
814 | |
815 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, | |
816 size_t word_size) { | |
817 ResourceMark rm; | |
818 | |
819 if (full && DisableExplicitGC) { | |
820 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); | |
821 return; | |
822 } | |
823 | |
824 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
825 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
826 | |
827 if (GC_locker::is_active()) { | |
828 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
829 } | |
830 | |
831 { | |
832 IsGCActiveMark x; | |
833 | |
834 // Timing | |
835 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
836 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
837 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); | |
838 | |
839 double start = os::elapsedTime(); | |
840 GCOverheadReporter::recordSTWStart(start); | |
841 g1_policy()->record_full_collection_start(); | |
842 | |
843 gc_prologue(true); | |
844 increment_total_collections(); | |
845 | |
846 size_t g1h_prev_used = used(); | |
847 assert(used() == recalculate_used(), "Should be equal"); | |
848 | |
849 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
850 HandleMark hm; // Discard invalid handles created during verification | |
851 prepare_for_verify(); | |
852 gclog_or_tty->print(" VerifyBeforeGC:"); | |
853 Universe::verify(true); | |
854 } | |
855 assert(regions_accounted_for(), "Region leakage!"); | |
856 | |
857 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
858 | |
859 // We want to discover references, but not process them yet. | |
860 // This mode is disabled in | |
861 // instanceRefKlass::process_discovered_references if the | |
862 // generation does some collection work, or | |
863 // instanceRefKlass::enqueue_discovered_references if the | |
864 // generation returns without doing any work. | |
865 ref_processor()->disable_discovery(); | |
866 ref_processor()->abandon_partial_discovery(); | |
867 ref_processor()->verify_no_references_recorded(); | |
868 | |
869 // Abandon current iterations of concurrent marking and concurrent | |
870 // refinement, if any are in progress. | |
871 concurrent_mark()->abort(); | |
872 | |
873 // Make sure we'll choose a new allocation region afterwards. | |
874 abandon_cur_alloc_region(); | |
875 assert(_cur_alloc_region == NULL, "Invariant."); | |
876 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
877 tear_down_region_lists(); | |
878 set_used_regions_to_need_zero_fill(); | |
879 if (g1_policy()->in_young_gc_mode()) { | |
880 empty_young_list(); | |
881 g1_policy()->set_full_young_gcs(true); | |
882 } | |
883 | |
884 // Temporarily make reference _discovery_ single threaded (non-MT). | |
885 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
886 | |
887 // Temporarily make refs discovery atomic | |
888 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
889 | |
890 // Temporarily clear _is_alive_non_header | |
891 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
892 | |
893 ref_processor()->enable_discovery(); | |
894 | |
895 // Do collection work | |
896 { | |
897 HandleMark hm; // Discard invalid handles created during gc | |
898 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); | |
899 } | |
900 // Because freeing humongous regions may have added some unclean | |
901 // regions, it is necessary to tear down again before rebuilding. | |
902 tear_down_region_lists(); | |
903 rebuild_region_lists(); | |
904 | |
905 _summary_bytes_used = recalculate_used(); | |
906 | |
907 ref_processor()->enqueue_discovered_references(); | |
908 | |
909 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
910 | |
911 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
912 HandleMark hm; // Discard invalid handles created during verification | |
913 gclog_or_tty->print(" VerifyAfterGC:"); | |
914 Universe::verify(false); | |
915 } | |
916 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
917 | |
918 reset_gc_time_stamp(); | |
919 // Since everything potentially moved, we will clear all remembered | |
920 // sets, and clear all cards. Later we will also cards in the used | |
921 // portion of the heap after the resizing (which could be a shrinking.) | |
922 // We will also reset the GC time stamps of the regions. | |
923 PostMCRemSetClearClosure rs_clear(mr_bs()); | |
924 heap_region_iterate(&rs_clear); | |
925 | |
926 // Resize the heap if necessary. | |
927 resize_if_necessary_after_full_collection(full ? 0 : word_size); | |
928 | |
929 // Since everything potentially moved, we will clear all remembered | |
930 // sets, but also dirty all cards corresponding to used regions. | |
931 PostMCRemSetInvalidateClosure rs_invalidate(mr_bs()); | |
932 heap_region_iterate(&rs_invalidate); | |
933 if (_cg1r->use_cache()) { | |
934 _cg1r->clear_and_record_card_counts(); | |
935 _cg1r->clear_hot_cache(); | |
936 } | |
937 | |
938 if (PrintGC) { | |
939 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
940 } | |
941 | |
942 if (true) { // FIXME | |
943 // Ask the permanent generation to adjust size for full collections | |
944 perm()->compute_new_size(); | |
945 } | |
946 | |
947 double end = os::elapsedTime(); | |
948 GCOverheadReporter::recordSTWEnd(end); | |
949 g1_policy()->record_full_collection_end(); | |
950 | |
951 gc_epilogue(true); | |
952 | |
953 // Abandon concurrent refinement. This must happen last: in the | |
954 // dirty-card logging system, some cards may be dirty by weak-ref | |
955 // processing, and may be enqueued. But the whole card table is | |
956 // dirtied, so this should abandon those logs, and set "do_traversal" | |
957 // to true. | |
958 concurrent_g1_refine()->set_pya_restart(); | |
959 | |
960 assert(regions_accounted_for(), "Region leakage!"); | |
961 } | |
962 | |
963 if (g1_policy()->in_young_gc_mode()) { | |
964 _young_list->reset_sampled_info(); | |
965 assert( check_young_list_empty(false, false), | |
966 "young list should be empty at this point"); | |
967 } | |
968 } | |
969 | |
970 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
971 do_collection(true, clear_all_soft_refs, 0); | |
972 } | |
973 | |
974 // This code is mostly copied from TenuredGeneration. | |
975 void | |
976 G1CollectedHeap:: | |
977 resize_if_necessary_after_full_collection(size_t word_size) { | |
978 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
979 | |
980 // Include the current allocation, if any, and bytes that will be | |
981 // pre-allocated to support collections, as "used". | |
982 const size_t used_after_gc = used(); | |
983 const size_t capacity_after_gc = capacity(); | |
984 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
985 | |
986 // We don't have floating point command-line arguments | |
987 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; | |
988 const double maximum_used_percentage = 1.0 - minimum_free_percentage; | |
989 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
990 const double minimum_used_percentage = 1.0 - maximum_free_percentage; | |
991 | |
992 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); | |
993 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); | |
994 | |
995 // Don't shrink less than the initial size. | |
996 minimum_desired_capacity = | |
997 MAX2(minimum_desired_capacity, | |
998 collector_policy()->initial_heap_byte_size()); | |
999 maximum_desired_capacity = | |
1000 MAX2(maximum_desired_capacity, | |
1001 collector_policy()->initial_heap_byte_size()); | |
1002 | |
1003 // We are failing here because minimum_desired_capacity is | |
1004 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); | |
1005 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); | |
1006 | |
1007 if (PrintGC && Verbose) { | |
1008 const double free_percentage = ((double)free_after_gc) / capacity(); | |
1009 gclog_or_tty->print_cr("Computing new size after full GC "); | |
1010 gclog_or_tty->print_cr(" " | |
1011 " minimum_free_percentage: %6.2f", | |
1012 minimum_free_percentage); | |
1013 gclog_or_tty->print_cr(" " | |
1014 " maximum_free_percentage: %6.2f", | |
1015 maximum_free_percentage); | |
1016 gclog_or_tty->print_cr(" " | |
1017 " capacity: %6.1fK" | |
1018 " minimum_desired_capacity: %6.1fK" | |
1019 " maximum_desired_capacity: %6.1fK", | |
1020 capacity() / (double) K, | |
1021 minimum_desired_capacity / (double) K, | |
1022 maximum_desired_capacity / (double) K); | |
1023 gclog_or_tty->print_cr(" " | |
1024 " free_after_gc : %6.1fK" | |
1025 " used_after_gc : %6.1fK", | |
1026 free_after_gc / (double) K, | |
1027 used_after_gc / (double) K); | |
1028 gclog_or_tty->print_cr(" " | |
1029 " free_percentage: %6.2f", | |
1030 free_percentage); | |
1031 } | |
1032 if (capacity() < minimum_desired_capacity) { | |
1033 // Don't expand unless it's significant | |
1034 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1035 expand(expand_bytes); | |
1036 if (PrintGC && Verbose) { | |
1037 gclog_or_tty->print_cr(" expanding:" | |
1038 " minimum_desired_capacity: %6.1fK" | |
1039 " expand_bytes: %6.1fK", | |
1040 minimum_desired_capacity / (double) K, | |
1041 expand_bytes / (double) K); | |
1042 } | |
1043 | |
1044 // No expansion, now see if we want to shrink | |
1045 } else if (capacity() > maximum_desired_capacity) { | |
1046 // Capacity too large, compute shrinking size | |
1047 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1048 shrink(shrink_bytes); | |
1049 if (PrintGC && Verbose) { | |
1050 gclog_or_tty->print_cr(" " | |
1051 " shrinking:" | |
1052 " initSize: %.1fK" | |
1053 " maximum_desired_capacity: %.1fK", | |
1054 collector_policy()->initial_heap_byte_size() / (double) K, | |
1055 maximum_desired_capacity / (double) K); | |
1056 gclog_or_tty->print_cr(" " | |
1057 " shrink_bytes: %.1fK", | |
1058 shrink_bytes / (double) K); | |
1059 } | |
1060 } | |
1061 } | |
1062 | |
1063 | |
1064 HeapWord* | |
1065 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1066 HeapWord* result = NULL; | |
1067 | |
1068 // In a G1 heap, we're supposed to keep allocation from failing by | |
1069 // incremental pauses. Therefore, at least for now, we'll favor | |
1070 // expansion over collection. (This might change in the future if we can | |
1071 // do something smarter than full collection to satisfy a failed alloc.) | |
1072 | |
1073 result = expand_and_allocate(word_size); | |
1074 if (result != NULL) { | |
1075 assert(is_in(result), "result not in heap"); | |
1076 return result; | |
1077 } | |
1078 | |
1079 // OK, I guess we have to try collection. | |
1080 | |
1081 do_collection(false, false, word_size); | |
1082 | |
1083 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1084 | |
1085 if (result != NULL) { | |
1086 assert(is_in(result), "result not in heap"); | |
1087 return result; | |
1088 } | |
1089 | |
1090 // Try collecting soft references. | |
1091 do_collection(false, true, word_size); | |
1092 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1093 if (result != NULL) { | |
1094 assert(is_in(result), "result not in heap"); | |
1095 return result; | |
1096 } | |
1097 | |
1098 // What else? We might try synchronous finalization later. If the total | |
1099 // space available is large enough for the allocation, then a more | |
1100 // complete compaction phase than we've tried so far might be | |
1101 // appropriate. | |
1102 return NULL; | |
1103 } | |
1104 | |
1105 // Attempting to expand the heap sufficiently | |
1106 // to support an allocation of the given "word_size". If | |
1107 // successful, perform the allocation and return the address of the | |
1108 // allocated block, or else "NULL". | |
1109 | |
1110 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1111 size_t expand_bytes = word_size * HeapWordSize; | |
1112 if (expand_bytes < MinHeapDeltaBytes) { | |
1113 expand_bytes = MinHeapDeltaBytes; | |
1114 } | |
1115 expand(expand_bytes); | |
1116 assert(regions_accounted_for(), "Region leakage!"); | |
1117 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1118 return result; | |
1119 } | |
1120 | |
1121 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1122 size_t pre_used = 0; | |
1123 size_t cleared_h_regions = 0; | |
1124 size_t freed_regions = 0; | |
1125 UncleanRegionList local_list; | |
1126 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1127 freed_regions, &local_list); | |
1128 | |
1129 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1130 &local_list); | |
1131 return pre_used; | |
1132 } | |
1133 | |
1134 void | |
1135 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1136 size_t& pre_used, | |
1137 size_t& cleared_h, | |
1138 size_t& freed_regions, | |
1139 UncleanRegionList* list, | |
1140 bool par) { | |
1141 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1142 size_t res = 0; | |
1143 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) { | |
1144 if (!hr->is_young()) { | |
1145 if (G1PolicyVerbose > 0) | |
1146 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1147 " during cleanup", hr, hr->used()); | |
1148 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
1149 } | |
1150 } | |
1151 } | |
1152 | |
1153 // FIXME: both this and shrink could probably be more efficient by | |
1154 // doing one "VirtualSpace::expand_by" call rather than several. | |
1155 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1156 size_t old_mem_size = _g1_storage.committed_size(); | |
1157 // We expand by a minimum of 1K. | |
1158 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1159 size_t aligned_expand_bytes = | |
1160 ReservedSpace::page_align_size_up(expand_bytes); | |
1161 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1162 HeapRegion::GrainBytes); | |
1163 expand_bytes = aligned_expand_bytes; | |
1164 while (expand_bytes > 0) { | |
1165 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1166 // Commit more storage. | |
1167 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1168 if (!successful) { | |
1169 expand_bytes = 0; | |
1170 } else { | |
1171 expand_bytes -= HeapRegion::GrainBytes; | |
1172 // Expand the committed region. | |
1173 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1174 _g1_committed.set_end(high); | |
1175 // Create a new HeapRegion. | |
1176 MemRegion mr(base, high); | |
1177 bool is_zeroed = !_g1_max_committed.contains(base); | |
1178 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1179 | |
1180 // Now update max_committed if necessary. | |
1181 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1182 | |
1183 // Add it to the HeapRegionSeq. | |
1184 _hrs->insert(hr); | |
1185 // Set the zero-fill state, according to whether it's already | |
1186 // zeroed. | |
1187 { | |
1188 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1189 if (is_zeroed) { | |
1190 hr->set_zero_fill_complete(); | |
1191 put_free_region_on_list_locked(hr); | |
1192 } else { | |
1193 hr->set_zero_fill_needed(); | |
1194 put_region_on_unclean_list_locked(hr); | |
1195 } | |
1196 } | |
1197 _free_regions++; | |
1198 // And we used up an expansion region to create it. | |
1199 _expansion_regions--; | |
1200 // Tell the cardtable about it. | |
1201 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1202 // And the offset table as well. | |
1203 _bot_shared->resize(_g1_committed.word_size()); | |
1204 } | |
1205 } | |
1206 if (Verbose && PrintGC) { | |
1207 size_t new_mem_size = _g1_storage.committed_size(); | |
1208 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1209 old_mem_size/K, aligned_expand_bytes/K, | |
1210 new_mem_size/K); | |
1211 } | |
1212 } | |
1213 | |
1214 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1215 { | |
1216 size_t old_mem_size = _g1_storage.committed_size(); | |
1217 size_t aligned_shrink_bytes = | |
1218 ReservedSpace::page_align_size_down(shrink_bytes); | |
1219 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1220 HeapRegion::GrainBytes); | |
1221 size_t num_regions_deleted = 0; | |
1222 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1223 | |
1224 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1225 if (mr.byte_size() > 0) | |
1226 _g1_storage.shrink_by(mr.byte_size()); | |
1227 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1228 | |
1229 _g1_committed.set_end(mr.start()); | |
1230 _free_regions -= num_regions_deleted; | |
1231 _expansion_regions += num_regions_deleted; | |
1232 | |
1233 // Tell the cardtable about it. | |
1234 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1235 | |
1236 // And the offset table as well. | |
1237 _bot_shared->resize(_g1_committed.word_size()); | |
1238 | |
1239 HeapRegionRemSet::shrink_heap(n_regions()); | |
1240 | |
1241 if (Verbose && PrintGC) { | |
1242 size_t new_mem_size = _g1_storage.committed_size(); | |
1243 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1244 old_mem_size/K, aligned_shrink_bytes/K, | |
1245 new_mem_size/K); | |
1246 } | |
1247 } | |
1248 | |
1249 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
1250 release_gc_alloc_regions(); | |
1251 tear_down_region_lists(); // We will rebuild them in a moment. | |
1252 shrink_helper(shrink_bytes); | |
1253 rebuild_region_lists(); | |
1254 } | |
1255 | |
1256 // Public methods. | |
1257 | |
1258 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1259 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1260 #endif // _MSC_VER | |
1261 | |
1262 | |
1263 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1264 SharedHeap(policy_), | |
1265 _g1_policy(policy_), | |
1266 _ref_processor(NULL), | |
1267 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1268 _bot_shared(NULL), | |
1269 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1270 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1271 _evac_failure_scan_stack(NULL) , | |
1272 _mark_in_progress(false), | |
1273 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1274 _cur_alloc_region(NULL), | |
1275 _refine_cte_cl(NULL), | |
1276 _free_region_list(NULL), _free_region_list_size(0), | |
1277 _free_regions(0), | |
1278 _popular_object_boundary(NULL), | |
1279 _cur_pop_hr_index(0), | |
1280 _popular_regions_to_be_evacuated(NULL), | |
1281 _pop_obj_rc_at_copy(), | |
1282 _full_collection(false), | |
1283 _unclean_region_list(), | |
1284 _unclean_regions_coming(false), | |
1285 _young_list(new YoungList(this)), | |
1286 _gc_time_stamp(0), | |
1287 _surviving_young_words(NULL) | |
1288 { | |
1289 _g1h = this; // To catch bugs. | |
1290 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1291 vm_exit_during_initialization("Failed necessary allocation."); | |
1292 } | |
1293 int n_queues = MAX2((int)ParallelGCThreads, 1); | |
1294 _task_queues = new RefToScanQueueSet(n_queues); | |
1295 | |
1296 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1297 assert(n_rem_sets > 0, "Invariant."); | |
1298 | |
1299 HeapRegionRemSetIterator** iter_arr = | |
1300 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1301 for (int i = 0; i < n_queues; i++) { | |
1302 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1303 } | |
1304 _rem_set_iterator = iter_arr; | |
1305 | |
1306 for (int i = 0; i < n_queues; i++) { | |
1307 RefToScanQueue* q = new RefToScanQueue(); | |
1308 q->initialize(); | |
1309 _task_queues->register_queue(i, q); | |
1310 } | |
1311 | |
1312 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
1313 _gc_alloc_regions[ap] = NULL; | |
1314 _gc_alloc_region_counts[ap] = 0; | |
1315 } | |
1316 guarantee(_task_queues != NULL, "task_queues allocation failure."); | |
1317 } | |
1318 | |
1319 jint G1CollectedHeap::initialize() { | |
1320 os::enable_vtime(); | |
1321 | |
1322 // Necessary to satisfy locking discipline assertions. | |
1323 | |
1324 MutexLocker x(Heap_lock); | |
1325 | |
1326 // While there are no constraints in the GC code that HeapWordSize | |
1327 // be any particular value, there are multiple other areas in the | |
1328 // system which believe this to be true (e.g. oop->object_size in some | |
1329 // cases incorrectly returns the size in wordSize units rather than | |
1330 // HeapWordSize). | |
1331 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1332 | |
1333 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1334 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1335 | |
1336 // Ensure that the sizes are properly aligned. | |
1337 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1338 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1339 | |
1340 // We allocate this in any case, but only do no work if the command line | |
1341 // param is off. | |
1342 _cg1r = new ConcurrentG1Refine(); | |
1343 | |
1344 // Reserve the maximum. | |
1345 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1346 // Includes the perm-gen. | |
1347 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), | |
1348 HeapRegion::GrainBytes, | |
1349 false /*ism*/); | |
1350 | |
1351 if (!heap_rs.is_reserved()) { | |
1352 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1353 return JNI_ENOMEM; | |
1354 } | |
1355 | |
1356 // It is important to do this in a way such that concurrent readers can't | |
1357 // temporarily think somethings in the heap. (I've actually seen this | |
1358 // happen in asserts: DLD.) | |
1359 _reserved.set_word_size(0); | |
1360 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1361 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1362 | |
1363 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1364 | |
1365 _num_humongous_regions = 0; | |
1366 | |
1367 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1368 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1369 set_barrier_set(rem_set()->bs()); | |
1370 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1371 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1372 } else { | |
1373 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1374 return JNI_ENOMEM; | |
1375 } | |
1376 | |
1377 // Also create a G1 rem set. | |
1378 if (G1UseHRIntoRS) { | |
1379 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1380 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1381 } else { | |
1382 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1383 return JNI_ENOMEM; | |
1384 } | |
1385 } else { | |
1386 _g1_rem_set = new StupidG1RemSet(this); | |
1387 } | |
1388 | |
1389 // Carve out the G1 part of the heap. | |
1390 | |
1391 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1392 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1393 g1_rs.size()/HeapWordSize); | |
1394 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1395 | |
1396 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1397 | |
1398 _g1_storage.initialize(g1_rs, 0); | |
1399 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1400 _g1_max_committed = _g1_committed; | |
393 | 1401 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1402 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1403 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1404 | |
1405 _bot_shared = new G1BlockOffsetSharedArray(_reserved, | |
1406 heap_word_size(init_byte_size)); | |
1407 | |
1408 _g1h = this; | |
1409 | |
1410 // Create the ConcurrentMark data structure and thread. | |
1411 // (Must do this late, so that "max_regions" is defined.) | |
1412 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1413 _cmThread = _cm->cmThread(); | |
1414 | |
1415 // ...and the concurrent zero-fill thread, if necessary. | |
1416 if (G1ConcZeroFill) { | |
1417 _czft = new ConcurrentZFThread(); | |
1418 } | |
1419 | |
1420 | |
1421 | |
1422 // Allocate the popular regions; take them off free lists. | |
1423 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes; | |
1424 expand(pop_byte_size); | |
1425 _popular_object_boundary = | |
1426 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords); | |
1427 for (int i = 0; i < G1NumPopularRegions; i++) { | |
1428 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords); | |
1429 // assert(hr != NULL && hr->bottom() < _popular_object_boundary, | |
1430 // "Should be enough, and all should be below boundary."); | |
1431 hr->set_popular(true); | |
1432 } | |
1433 assert(_cur_pop_hr_index == 0, "Start allocating at the first region."); | |
1434 | |
1435 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1436 HeapRegionRemSet::init_heap(max_regions()); | |
1437 | |
1438 // Now expand into the rest of the initial heap size. | |
1439 expand(init_byte_size - pop_byte_size); | |
1440 | |
1441 // Perform any initialization actions delegated to the policy. | |
1442 g1_policy()->init(); | |
1443 | |
1444 g1_policy()->note_start_of_mark_thread(); | |
1445 | |
1446 _refine_cte_cl = | |
1447 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1448 g1_rem_set(), | |
1449 concurrent_g1_refine()); | |
1450 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1451 | |
1452 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1453 SATB_Q_FL_lock, | |
1454 0, | |
1455 Shared_SATB_Q_lock); | |
1456 if (G1RSBarrierUseQueue) { | |
1457 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1458 DirtyCardQ_FL_lock, | |
1459 G1DirtyCardQueueMax, | |
1460 Shared_DirtyCardQ_lock); | |
1461 } | |
1462 // In case we're keeping closure specialization stats, initialize those | |
1463 // counts and that mechanism. | |
1464 SpecializationStats::clear(); | |
1465 | |
1466 _gc_alloc_region_list = NULL; | |
1467 | |
1468 // Do later initialization work for concurrent refinement. | |
1469 _cg1r->init(); | |
1470 | |
1471 const char* group_names[] = { "CR", "ZF", "CM", "CL" }; | |
1472 GCOverheadReporter::initGCOverheadReporter(4, group_names); | |
1473 | |
1474 return JNI_OK; | |
1475 } | |
1476 | |
1477 void G1CollectedHeap::ref_processing_init() { | |
1478 SharedHeap::ref_processing_init(); | |
1479 MemRegion mr = reserved_region(); | |
1480 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1481 mr, // span | |
1482 false, // Reference discovery is not atomic | |
1483 // (though it shouldn't matter here.) | |
1484 true, // mt_discovery | |
1485 NULL, // is alive closure: need to fill this in for efficiency | |
1486 ParallelGCThreads, | |
1487 ParallelRefProcEnabled, | |
1488 true); // Setting next fields of discovered | |
1489 // lists requires a barrier. | |
1490 } | |
1491 | |
1492 size_t G1CollectedHeap::capacity() const { | |
1493 return _g1_committed.byte_size(); | |
1494 } | |
1495 | |
1496 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, | |
1497 int worker_i) { | |
1498 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
1499 int n_completed_buffers = 0; | |
1500 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { | |
1501 n_completed_buffers++; | |
1502 } | |
1503 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1504 (double) n_completed_buffers); | |
1505 dcqs.clear_n_completed_buffers(); | |
1506 // Finish up the queue... | |
1507 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, | |
1508 g1_rem_set()); | |
1509 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1510 } | |
1511 | |
1512 | |
1513 // Computes the sum of the storage used by the various regions. | |
1514 | |
1515 size_t G1CollectedHeap::used() const { | |
1516 assert(Heap_lock->owner() != NULL, | |
1517 "Should be owned on this thread's behalf."); | |
1518 size_t result = _summary_bytes_used; | |
1519 if (_cur_alloc_region != NULL) | |
1520 result += _cur_alloc_region->used(); | |
1521 return result; | |
1522 } | |
1523 | |
1524 class SumUsedClosure: public HeapRegionClosure { | |
1525 size_t _used; | |
1526 public: | |
1527 SumUsedClosure() : _used(0) {} | |
1528 bool doHeapRegion(HeapRegion* r) { | |
1529 if (!r->continuesHumongous()) { | |
1530 _used += r->used(); | |
1531 } | |
1532 return false; | |
1533 } | |
1534 size_t result() { return _used; } | |
1535 }; | |
1536 | |
1537 size_t G1CollectedHeap::recalculate_used() const { | |
1538 SumUsedClosure blk; | |
1539 _hrs->iterate(&blk); | |
1540 return blk.result(); | |
1541 } | |
1542 | |
1543 #ifndef PRODUCT | |
1544 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1545 size_t _num; | |
1546 public: | |
1547 // _num is set to 1 to account for the popular region | |
1548 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {} | |
1549 bool doHeapRegion(HeapRegion* r) { | |
1550 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1551 _num += 1; | |
1552 } | |
1553 return false; | |
1554 } | |
1555 size_t result() { return _num; } | |
1556 }; | |
1557 | |
1558 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1559 SumUsedRegionsClosure blk; | |
1560 _hrs->iterate(&blk); | |
1561 return blk.result(); | |
1562 } | |
1563 #endif // PRODUCT | |
1564 | |
1565 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1566 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1567 // otherwise, is there space in the current allocation region? | |
1568 | |
1569 // We need to store the current allocation region in a local variable | |
1570 // here. The problem is that this method doesn't take any locks and | |
1571 // there may be other threads which overwrite the current allocation | |
1572 // region field. attempt_allocation(), for example, sets it to NULL | |
1573 // and this can happen *after* the NULL check here but before the call | |
1574 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1575 // to be a problem in the optimized build, since the two loads of the | |
1576 // current allocation region field are optimized away. | |
1577 HeapRegion* car = _cur_alloc_region; | |
1578 | |
1579 // FIXME: should iterate over all regions? | |
1580 if (car == NULL) { | |
1581 return 0; | |
1582 } | |
1583 return car->free(); | |
1584 } | |
1585 | |
1586 void G1CollectedHeap::collect(GCCause::Cause cause) { | |
1587 // The caller doesn't have the Heap_lock | |
1588 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
1589 MutexLocker ml(Heap_lock); | |
1590 collect_locked(cause); | |
1591 } | |
1592 | |
1593 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
1594 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1595 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1596 GCCauseSetter gcs(this, cause); | |
1597 switch (cause) { | |
1598 case GCCause::_heap_inspection: | |
1599 case GCCause::_heap_dump: { | |
1600 HandleMark hm; | |
1601 do_full_collection(false); // don't clear all soft refs | |
1602 break; | |
1603 } | |
1604 default: // XXX FIX ME | |
1605 ShouldNotReachHere(); // Unexpected use of this function | |
1606 } | |
1607 } | |
1608 | |
1609 | |
1610 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { | |
1611 // Don't want to do a GC until cleanup is completed. | |
1612 wait_for_cleanup_complete(); | |
1613 | |
1614 // Read the GC count while holding the Heap_lock | |
1615 int gc_count_before = SharedHeap::heap()->total_collections(); | |
1616 { | |
1617 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1618 VM_G1CollectFull op(gc_count_before, cause); | |
1619 VMThread::execute(&op); | |
1620 } | |
1621 } | |
1622 | |
1623 bool G1CollectedHeap::is_in(const void* p) const { | |
1624 if (_g1_committed.contains(p)) { | |
1625 HeapRegion* hr = _hrs->addr_to_region(p); | |
1626 return hr->is_in(p); | |
1627 } else { | |
1628 return _perm_gen->as_gen()->is_in(p); | |
1629 } | |
1630 } | |
1631 | |
1632 // Iteration functions. | |
1633 | |
1634 // Iterates an OopClosure over all ref-containing fields of objects | |
1635 // within a HeapRegion. | |
1636 | |
1637 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1638 MemRegion _mr; | |
1639 OopClosure* _cl; | |
1640 public: | |
1641 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1642 : _mr(mr), _cl(cl) {} | |
1643 bool doHeapRegion(HeapRegion* r) { | |
1644 if (! r->continuesHumongous()) { | |
1645 r->oop_iterate(_cl); | |
1646 } | |
1647 return false; | |
1648 } | |
1649 }; | |
1650 | |
1651 void G1CollectedHeap::oop_iterate(OopClosure* cl) { | |
1652 IterateOopClosureRegionClosure blk(_g1_committed, cl); | |
1653 _hrs->iterate(&blk); | |
1654 } | |
1655 | |
1656 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
1657 IterateOopClosureRegionClosure blk(mr, cl); | |
1658 _hrs->iterate(&blk); | |
1659 } | |
1660 | |
1661 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1662 | |
1663 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1664 ObjectClosure* _cl; | |
1665 public: | |
1666 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1667 bool doHeapRegion(HeapRegion* r) { | |
1668 if (! r->continuesHumongous()) { | |
1669 r->object_iterate(_cl); | |
1670 } | |
1671 return false; | |
1672 } | |
1673 }; | |
1674 | |
1675 void G1CollectedHeap::object_iterate(ObjectClosure* cl) { | |
1676 IterateObjectClosureRegionClosure blk(cl); | |
1677 _hrs->iterate(&blk); | |
1678 } | |
1679 | |
1680 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1681 // FIXME: is this right? | |
1682 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1683 } | |
1684 | |
1685 // Calls a SpaceClosure on a HeapRegion. | |
1686 | |
1687 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1688 SpaceClosure* _cl; | |
1689 public: | |
1690 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1691 bool doHeapRegion(HeapRegion* r) { | |
1692 _cl->do_space(r); | |
1693 return false; | |
1694 } | |
1695 }; | |
1696 | |
1697 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1698 SpaceClosureRegionClosure blk(cl); | |
1699 _hrs->iterate(&blk); | |
1700 } | |
1701 | |
1702 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1703 _hrs->iterate(cl); | |
1704 } | |
1705 | |
1706 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1707 HeapRegionClosure* cl) { | |
1708 _hrs->iterate_from(r, cl); | |
1709 } | |
1710 | |
1711 void | |
1712 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1713 _hrs->iterate_from(idx, cl); | |
1714 } | |
1715 | |
1716 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1717 | |
1718 void | |
1719 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1720 int worker, | |
1721 jint claim_value) { | |
355 | 1722 const size_t regions = n_regions(); |
1723 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1724 // try to spread out the starting points of the workers | |
1725 const size_t start_index = regions / worker_num * (size_t) worker; | |
1726 | |
1727 // each worker will actually look at all regions | |
1728 for (size_t count = 0; count < regions; ++count) { | |
1729 const size_t index = (start_index + count) % regions; | |
1730 assert(0 <= index && index < regions, "sanity"); | |
1731 HeapRegion* r = region_at(index); | |
1732 // we'll ignore "continues humongous" regions (we'll process them | |
1733 // when we come across their corresponding "start humongous" | |
1734 // region) and regions already claimed | |
1735 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1736 continue; | |
1737 } | |
1738 // OK, try to claim it | |
342 | 1739 if (r->claimHeapRegion(claim_value)) { |
355 | 1740 // success! |
1741 assert(!r->continuesHumongous(), "sanity"); | |
1742 if (r->startsHumongous()) { | |
1743 // If the region is "starts humongous" we'll iterate over its | |
1744 // "continues humongous" first; in fact we'll do them | |
1745 // first. The order is important. In on case, calling the | |
1746 // closure on the "starts humongous" region might de-allocate | |
1747 // and clear all its "continues humongous" regions and, as a | |
1748 // result, we might end up processing them twice. So, we'll do | |
1749 // them first (notice: most closures will ignore them anyway) and | |
1750 // then we'll do the "starts humongous" region. | |
1751 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1752 HeapRegion* chr = region_at(ch_index); | |
1753 | |
1754 // if the region has already been claimed or it's not | |
1755 // "continues humongous" we're done | |
1756 if (chr->claim_value() == claim_value || | |
1757 !chr->continuesHumongous()) { | |
1758 break; | |
1759 } | |
1760 | |
1761 // Noone should have claimed it directly. We can given | |
1762 // that we claimed its "starts humongous" region. | |
1763 assert(chr->claim_value() != claim_value, "sanity"); | |
1764 assert(chr->humongous_start_region() == r, "sanity"); | |
1765 | |
1766 if (chr->claimHeapRegion(claim_value)) { | |
1767 // we should always be able to claim it; noone else should | |
1768 // be trying to claim this region | |
1769 | |
1770 bool res2 = cl->doHeapRegion(chr); | |
1771 assert(!res2, "Should not abort"); | |
1772 | |
1773 // Right now, this holds (i.e., no closure that actually | |
1774 // does something with "continues humongous" regions | |
1775 // clears them). We might have to weaken it in the future, | |
1776 // but let's leave these two asserts here for extra safety. | |
1777 assert(chr->continuesHumongous(), "should still be the case"); | |
1778 assert(chr->humongous_start_region() == r, "sanity"); | |
1779 } else { | |
1780 guarantee(false, "we should not reach here"); | |
1781 } | |
1782 } | |
1783 } | |
1784 | |
1785 assert(!r->continuesHumongous(), "sanity"); | |
1786 bool res = cl->doHeapRegion(r); | |
1787 assert(!res, "Should not abort"); | |
1788 } | |
1789 } | |
1790 } | |
1791 | |
390 | 1792 class ResetClaimValuesClosure: public HeapRegionClosure { |
1793 public: | |
1794 bool doHeapRegion(HeapRegion* r) { | |
1795 r->set_claim_value(HeapRegion::InitialClaimValue); | |
1796 return false; | |
1797 } | |
1798 }; | |
1799 | |
1800 void | |
1801 G1CollectedHeap::reset_heap_region_claim_values() { | |
1802 ResetClaimValuesClosure blk; | |
1803 heap_region_iterate(&blk); | |
1804 } | |
1805 | |
355 | 1806 #ifdef ASSERT |
1807 // This checks whether all regions in the heap have the correct claim | |
1808 // value. I also piggy-backed on this a check to ensure that the | |
1809 // humongous_start_region() information on "continues humongous" | |
1810 // regions is correct. | |
1811 | |
1812 class CheckClaimValuesClosure : public HeapRegionClosure { | |
1813 private: | |
1814 jint _claim_value; | |
1815 size_t _failures; | |
1816 HeapRegion* _sh_region; | |
1817 public: | |
1818 CheckClaimValuesClosure(jint claim_value) : | |
1819 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
1820 bool doHeapRegion(HeapRegion* r) { | |
1821 if (r->claim_value() != _claim_value) { | |
1822 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1823 "claim value = %d, should be %d", | |
1824 r->bottom(), r->end(), r->claim_value(), | |
1825 _claim_value); | |
1826 ++_failures; | |
1827 } | |
1828 if (!r->isHumongous()) { | |
1829 _sh_region = NULL; | |
1830 } else if (r->startsHumongous()) { | |
1831 _sh_region = r; | |
1832 } else if (r->continuesHumongous()) { | |
1833 if (r->humongous_start_region() != _sh_region) { | |
1834 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1835 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
1836 r->bottom(), r->end(), | |
1837 r->humongous_start_region(), | |
1838 _sh_region); | |
1839 ++_failures; | |
342 | 1840 } |
1841 } | |
355 | 1842 return false; |
1843 } | |
1844 size_t failures() { | |
1845 return _failures; | |
1846 } | |
1847 }; | |
1848 | |
1849 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
1850 CheckClaimValuesClosure cl(claim_value); | |
1851 heap_region_iterate(&cl); | |
1852 return cl.failures() == 0; | |
1853 } | |
1854 #endif // ASSERT | |
342 | 1855 |
1856 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
1857 HeapRegion* r = g1_policy()->collection_set(); | |
1858 while (r != NULL) { | |
1859 HeapRegion* next = r->next_in_collection_set(); | |
1860 if (cl->doHeapRegion(r)) { | |
1861 cl->incomplete(); | |
1862 return; | |
1863 } | |
1864 r = next; | |
1865 } | |
1866 } | |
1867 | |
1868 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
1869 HeapRegionClosure *cl) { | |
1870 assert(r->in_collection_set(), | |
1871 "Start region must be a member of the collection set."); | |
1872 HeapRegion* cur = r; | |
1873 while (cur != NULL) { | |
1874 HeapRegion* next = cur->next_in_collection_set(); | |
1875 if (cl->doHeapRegion(cur) && false) { | |
1876 cl->incomplete(); | |
1877 return; | |
1878 } | |
1879 cur = next; | |
1880 } | |
1881 cur = g1_policy()->collection_set(); | |
1882 while (cur != r) { | |
1883 HeapRegion* next = cur->next_in_collection_set(); | |
1884 if (cl->doHeapRegion(cur) && false) { | |
1885 cl->incomplete(); | |
1886 return; | |
1887 } | |
1888 cur = next; | |
1889 } | |
1890 } | |
1891 | |
1892 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
1893 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
1894 } | |
1895 | |
1896 | |
1897 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
1898 Space* res = heap_region_containing(addr); | |
1899 if (res == NULL) | |
1900 res = perm_gen()->space_containing(addr); | |
1901 return res; | |
1902 } | |
1903 | |
1904 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
1905 Space* sp = space_containing(addr); | |
1906 if (sp != NULL) { | |
1907 return sp->block_start(addr); | |
1908 } | |
1909 return NULL; | |
1910 } | |
1911 | |
1912 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
1913 Space* sp = space_containing(addr); | |
1914 assert(sp != NULL, "block_size of address outside of heap"); | |
1915 return sp->block_size(addr); | |
1916 } | |
1917 | |
1918 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
1919 Space* sp = space_containing(addr); | |
1920 return sp->block_is_obj(addr); | |
1921 } | |
1922 | |
1923 bool G1CollectedHeap::supports_tlab_allocation() const { | |
1924 return true; | |
1925 } | |
1926 | |
1927 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
1928 return HeapRegion::GrainBytes; | |
1929 } | |
1930 | |
1931 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
1932 // Return the remaining space in the cur alloc region, but not less than | |
1933 // the min TLAB size. | |
1934 // Also, no more than half the region size, since we can't allow tlabs to | |
1935 // grow big enough to accomodate humongous objects. | |
1936 | |
1937 // We need to story it locally, since it might change between when we | |
1938 // test for NULL and when we use it later. | |
1939 ContiguousSpace* cur_alloc_space = _cur_alloc_region; | |
1940 if (cur_alloc_space == NULL) { | |
1941 return HeapRegion::GrainBytes/2; | |
1942 } else { | |
1943 return MAX2(MIN2(cur_alloc_space->free(), | |
1944 (size_t)(HeapRegion::GrainBytes/2)), | |
1945 (size_t)MinTLABSize); | |
1946 } | |
1947 } | |
1948 | |
1949 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
1950 bool dummy; | |
1951 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
1952 } | |
1953 | |
1954 bool G1CollectedHeap::allocs_are_zero_filled() { | |
1955 return false; | |
1956 } | |
1957 | |
1958 size_t G1CollectedHeap::large_typearray_limit() { | |
1959 // FIXME | |
1960 return HeapRegion::GrainBytes/HeapWordSize; | |
1961 } | |
1962 | |
1963 size_t G1CollectedHeap::max_capacity() const { | |
1964 return _g1_committed.byte_size(); | |
1965 } | |
1966 | |
1967 jlong G1CollectedHeap::millis_since_last_gc() { | |
1968 // assert(false, "NYI"); | |
1969 return 0; | |
1970 } | |
1971 | |
1972 | |
1973 void G1CollectedHeap::prepare_for_verify() { | |
1974 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
1975 ensure_parsability(false); | |
1976 } | |
1977 g1_rem_set()->prepare_for_verify(); | |
1978 } | |
1979 | |
1980 class VerifyLivenessOopClosure: public OopClosure { | |
1981 G1CollectedHeap* g1h; | |
1982 public: | |
1983 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
1984 g1h = _g1h; | |
1985 } | |
1986 void do_oop(narrowOop *p) { | |
1987 guarantee(false, "NYI"); | |
1988 } | |
1989 void do_oop(oop *p) { | |
1990 oop obj = *p; | |
1991 assert(obj == NULL || !g1h->is_obj_dead(obj), | |
1992 "Dead object referenced by a not dead object"); | |
1993 } | |
1994 }; | |
1995 | |
1996 class VerifyObjsInRegionClosure: public ObjectClosure { | |
1997 G1CollectedHeap* _g1h; | |
1998 size_t _live_bytes; | |
1999 HeapRegion *_hr; | |
2000 public: | |
2001 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { | |
2002 _g1h = G1CollectedHeap::heap(); | |
2003 } | |
2004 void do_object(oop o) { | |
2005 VerifyLivenessOopClosure isLive(_g1h); | |
2006 assert(o != NULL, "Huh?"); | |
2007 if (!_g1h->is_obj_dead(o)) { | |
2008 o->oop_iterate(&isLive); | |
2009 if (!_hr->obj_allocated_since_prev_marking(o)) | |
2010 _live_bytes += (o->size() * HeapWordSize); | |
2011 } | |
2012 } | |
2013 size_t live_bytes() { return _live_bytes; } | |
2014 }; | |
2015 | |
2016 class PrintObjsInRegionClosure : public ObjectClosure { | |
2017 HeapRegion *_hr; | |
2018 G1CollectedHeap *_g1; | |
2019 public: | |
2020 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2021 _g1 = G1CollectedHeap::heap(); | |
2022 }; | |
2023 | |
2024 void do_object(oop o) { | |
2025 if (o != NULL) { | |
2026 HeapWord *start = (HeapWord *) o; | |
2027 size_t word_sz = o->size(); | |
2028 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2029 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2030 (void*) o, word_sz, | |
2031 _g1->isMarkedPrev(o), | |
2032 _g1->isMarkedNext(o), | |
2033 _hr->obj_allocated_since_prev_marking(o)); | |
2034 HeapWord *end = start + word_sz; | |
2035 HeapWord *cur; | |
2036 int *val; | |
2037 for (cur = start; cur < end; cur++) { | |
2038 val = (int *) cur; | |
2039 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2040 } | |
2041 } | |
2042 } | |
2043 }; | |
2044 | |
2045 class VerifyRegionClosure: public HeapRegionClosure { | |
2046 public: | |
2047 bool _allow_dirty; | |
390 | 2048 bool _par; |
2049 VerifyRegionClosure(bool allow_dirty, bool par = false) | |
2050 : _allow_dirty(allow_dirty), _par(par) {} | |
342 | 2051 bool doHeapRegion(HeapRegion* r) { |
390 | 2052 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2053 "Should be unclaimed at verify points."); | |
342 | 2054 if (r->isHumongous()) { |
2055 if (r->startsHumongous()) { | |
2056 // Verify the single H object. | |
2057 oop(r->bottom())->verify(); | |
2058 size_t word_sz = oop(r->bottom())->size(); | |
2059 guarantee(r->top() == r->bottom() + word_sz, | |
2060 "Only one object in a humongous region"); | |
2061 } | |
2062 } else { | |
2063 VerifyObjsInRegionClosure not_dead_yet_cl(r); | |
2064 r->verify(_allow_dirty); | |
2065 r->object_iterate(¬_dead_yet_cl); | |
2066 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), | |
2067 "More live objects than counted in last complete marking."); | |
2068 } | |
2069 return false; | |
2070 } | |
2071 }; | |
2072 | |
2073 class VerifyRootsClosure: public OopsInGenClosure { | |
2074 private: | |
2075 G1CollectedHeap* _g1h; | |
2076 bool _failures; | |
2077 | |
2078 public: | |
2079 VerifyRootsClosure() : | |
2080 _g1h(G1CollectedHeap::heap()), _failures(false) { } | |
2081 | |
2082 bool failures() { return _failures; } | |
2083 | |
2084 void do_oop(narrowOop* p) { | |
2085 guarantee(false, "NYI"); | |
2086 } | |
2087 | |
2088 void do_oop(oop* p) { | |
2089 oop obj = *p; | |
2090 if (obj != NULL) { | |
2091 if (_g1h->is_obj_dead(obj)) { | |
2092 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " | |
2093 "points to dead obj "PTR_FORMAT, p, (void*) obj); | |
2094 obj->print_on(gclog_or_tty); | |
2095 _failures = true; | |
2096 } | |
2097 } | |
2098 } | |
2099 }; | |
2100 | |
390 | 2101 // This is the task used for parallel heap verification. |
2102 | |
2103 class G1ParVerifyTask: public AbstractGangTask { | |
2104 private: | |
2105 G1CollectedHeap* _g1h; | |
2106 bool _allow_dirty; | |
2107 | |
2108 public: | |
2109 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : | |
2110 AbstractGangTask("Parallel verify task"), | |
2111 _g1h(g1h), _allow_dirty(allow_dirty) { } | |
2112 | |
2113 void work(int worker_i) { | |
2114 VerifyRegionClosure blk(_allow_dirty, true); | |
2115 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, | |
2116 HeapRegion::ParVerifyClaimValue); | |
2117 } | |
2118 }; | |
2119 | |
342 | 2120 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
2121 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2122 if (!silent) { gclog_or_tty->print("roots "); } | |
2123 VerifyRootsClosure rootsCl; | |
2124 process_strong_roots(false, | |
2125 SharedHeap::SO_AllClasses, | |
2126 &rootsCl, | |
2127 &rootsCl); | |
2128 rem_set()->invalidate(perm_gen()->used_region(), false); | |
2129 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2130 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2131 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2132 "sanity check"); | |
2133 | |
2134 G1ParVerifyTask task(this, allow_dirty); | |
2135 int n_workers = workers()->total_workers(); | |
2136 set_par_threads(n_workers); | |
2137 workers()->run_task(&task); | |
2138 set_par_threads(0); | |
2139 | |
2140 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2141 "sanity check"); | |
2142 | |
2143 reset_heap_region_claim_values(); | |
2144 | |
2145 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2146 "sanity check"); | |
2147 } else { | |
2148 VerifyRegionClosure blk(allow_dirty); | |
2149 _hrs->iterate(&blk); | |
2150 } | |
342 | 2151 if (!silent) gclog_or_tty->print("remset "); |
2152 rem_set()->verify(); | |
2153 guarantee(!rootsCl.failures(), "should not have had failures"); | |
2154 } else { | |
2155 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2156 } | |
2157 } | |
2158 | |
2159 class PrintRegionClosure: public HeapRegionClosure { | |
2160 outputStream* _st; | |
2161 public: | |
2162 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2163 bool doHeapRegion(HeapRegion* r) { | |
2164 r->print_on(_st); | |
2165 return false; | |
2166 } | |
2167 }; | |
2168 | |
2169 void G1CollectedHeap::print() const { print_on(gclog_or_tty); } | |
2170 | |
2171 void G1CollectedHeap::print_on(outputStream* st) const { | |
2172 PrintRegionClosure blk(st); | |
2173 _hrs->iterate(&blk); | |
2174 } | |
2175 | |
2176 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
2177 if (ParallelGCThreads > 0) { | |
2178 workers()->print_worker_threads(); | |
2179 } | |
2180 st->print("\"G1 concurrent mark GC Thread\" "); | |
2181 _cmThread->print(); | |
2182 st->cr(); | |
2183 st->print("\"G1 concurrent refinement GC Thread\" "); | |
2184 _cg1r->cg1rThread()->print_on(st); | |
2185 st->cr(); | |
2186 st->print("\"G1 zero-fill GC Thread\" "); | |
2187 _czft->print_on(st); | |
2188 st->cr(); | |
2189 } | |
2190 | |
2191 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2192 if (ParallelGCThreads > 0) { | |
2193 workers()->threads_do(tc); | |
2194 } | |
2195 tc->do_thread(_cmThread); | |
2196 tc->do_thread(_cg1r->cg1rThread()); | |
2197 tc->do_thread(_czft); | |
2198 } | |
2199 | |
2200 void G1CollectedHeap::print_tracing_info() const { | |
2201 concurrent_g1_refine()->print_final_card_counts(); | |
2202 | |
2203 // We'll overload this to mean "trace GC pause statistics." | |
2204 if (TraceGen0Time || TraceGen1Time) { | |
2205 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2206 // to that. | |
2207 g1_policy()->print_tracing_info(); | |
2208 } | |
2209 if (SummarizeG1RSStats) { | |
2210 g1_rem_set()->print_summary_info(); | |
2211 } | |
2212 if (SummarizeG1ConcMark) { | |
2213 concurrent_mark()->print_summary_info(); | |
2214 } | |
2215 if (SummarizeG1ZFStats) { | |
2216 ConcurrentZFThread::print_summary_info(); | |
2217 } | |
2218 if (G1SummarizePopularity) { | |
2219 print_popularity_summary_info(); | |
2220 } | |
2221 g1_policy()->print_yg_surv_rate_info(); | |
2222 | |
2223 GCOverheadReporter::printGCOverhead(); | |
2224 | |
2225 SpecializationStats::print(); | |
2226 } | |
2227 | |
2228 | |
2229 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2230 HeapRegion* hr = heap_region_containing(addr); | |
2231 if (hr == NULL) { | |
2232 return 0; | |
2233 } else { | |
2234 return 1; | |
2235 } | |
2236 } | |
2237 | |
2238 G1CollectedHeap* G1CollectedHeap::heap() { | |
2239 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2240 "not a garbage-first heap"); | |
2241 return _g1h; | |
2242 } | |
2243 | |
2244 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
2245 if (PrintHeapAtGC){ | |
2246 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); | |
2247 Universe::print(); | |
2248 } | |
2249 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
2250 // Call allocation profiler | |
2251 AllocationProfiler::iterate_since_last_gc(); | |
2252 // Fill TLAB's and such | |
2253 ensure_parsability(true); | |
2254 } | |
2255 | |
2256 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2257 // FIXME: what is this about? | |
2258 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2259 // is set. | |
2260 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2261 "derived pointer present")); | |
2262 | |
2263 if (PrintHeapAtGC){ | |
2264 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); | |
2265 Universe::print(); | |
2266 gclog_or_tty->print("} "); | |
2267 } | |
2268 } | |
2269 | |
2270 void G1CollectedHeap::do_collection_pause() { | |
2271 // Read the GC count while holding the Heap_lock | |
2272 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2273 // ensure that we do not give up the heap lock and potentially | |
2274 // pick up the wrong count | |
2275 int gc_count_before = SharedHeap::heap()->total_collections(); | |
2276 | |
2277 // Don't want to do a GC pause while cleanup is being completed! | |
2278 wait_for_cleanup_complete(); | |
2279 | |
2280 g1_policy()->record_stop_world_start(); | |
2281 { | |
2282 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
2283 VM_G1IncCollectionPause op(gc_count_before); | |
2284 VMThread::execute(&op); | |
2285 } | |
2286 } | |
2287 | |
2288 void | |
2289 G1CollectedHeap::doConcurrentMark() { | |
2290 if (G1ConcMark) { | |
2291 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2292 if (!_cmThread->in_progress()) { | |
2293 _cmThread->set_started(); | |
2294 CGC_lock->notify(); | |
2295 } | |
2296 } | |
2297 } | |
2298 | |
2299 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2300 G1CollectedHeap* _g1h; | |
2301 public: | |
2302 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2303 void do_object(oop obj) { | |
2304 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2305 "markandsweep mark should agree with concurrent deadness"); | |
2306 } | |
2307 }; | |
2308 | |
2309 void | |
2310 G1CollectedHeap::checkConcurrentMark() { | |
2311 VerifyMarkedObjsClosure verifycl(this); | |
2312 doConcurrentMark(); | |
2313 // MutexLockerEx x(getMarkBitMapLock(), | |
2314 // Mutex::_no_safepoint_check_flag); | |
2315 object_iterate(&verifycl); | |
2316 } | |
2317 | |
2318 void G1CollectedHeap::do_sync_mark() { | |
2319 _cm->checkpointRootsInitial(); | |
2320 _cm->markFromRoots(); | |
2321 _cm->checkpointRootsFinal(false); | |
2322 } | |
2323 | |
2324 // <NEW PREDICTION> | |
2325 | |
2326 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2327 bool young) { | |
2328 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2329 } | |
2330 | |
2331 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2332 predicted_time_ms) { | |
2333 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2334 } | |
2335 | |
2336 size_t G1CollectedHeap::pending_card_num() { | |
2337 size_t extra_cards = 0; | |
2338 JavaThread *curr = Threads::first(); | |
2339 while (curr != NULL) { | |
2340 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2341 extra_cards += dcq.size(); | |
2342 curr = curr->next(); | |
2343 } | |
2344 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2345 size_t buffer_size = dcqs.buffer_size(); | |
2346 size_t buffer_num = dcqs.completed_buffers_num(); | |
2347 return buffer_size * buffer_num + extra_cards; | |
2348 } | |
2349 | |
2350 size_t G1CollectedHeap::max_pending_card_num() { | |
2351 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2352 size_t buffer_size = dcqs.buffer_size(); | |
2353 size_t buffer_num = dcqs.completed_buffers_num(); | |
2354 int thread_num = Threads::number_of_threads(); | |
2355 return (buffer_num + thread_num) * buffer_size; | |
2356 } | |
2357 | |
2358 size_t G1CollectedHeap::cards_scanned() { | |
2359 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2360 return g1_rset->cardsScanned(); | |
2361 } | |
2362 | |
2363 void | |
2364 G1CollectedHeap::setup_surviving_young_words() { | |
2365 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2366 size_t array_length = g1_policy()->young_cset_length(); | |
2367 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2368 if (_surviving_young_words == NULL) { | |
2369 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2370 "Not enough space for young surv words summary."); | |
2371 } | |
2372 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
2373 for (size_t i = 0; i < array_length; ++i) { | |
2374 guarantee( _surviving_young_words[i] == 0, "invariant" ); | |
2375 } | |
2376 } | |
2377 | |
2378 void | |
2379 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2380 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2381 size_t array_length = g1_policy()->young_cset_length(); | |
2382 for (size_t i = 0; i < array_length; ++i) | |
2383 _surviving_young_words[i] += surv_young_words[i]; | |
2384 } | |
2385 | |
2386 void | |
2387 G1CollectedHeap::cleanup_surviving_young_words() { | |
2388 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2389 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2390 _surviving_young_words = NULL; | |
2391 } | |
2392 | |
2393 // </NEW PREDICTION> | |
2394 | |
2395 void | |
2396 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { | |
2397 char verbose_str[128]; | |
2398 sprintf(verbose_str, "GC pause "); | |
2399 if (popular_region != NULL) | |
2400 strcat(verbose_str, "(popular)"); | |
2401 else if (g1_policy()->in_young_gc_mode()) { | |
2402 if (g1_policy()->full_young_gcs()) | |
2403 strcat(verbose_str, "(young)"); | |
2404 else | |
2405 strcat(verbose_str, "(partial)"); | |
2406 } | |
2407 bool reset_should_initiate_conc_mark = false; | |
2408 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) { | |
2409 // we currently do not allow an initial mark phase to be piggy-backed | |
2410 // on a popular pause | |
2411 reset_should_initiate_conc_mark = true; | |
2412 g1_policy()->unset_should_initiate_conc_mark(); | |
2413 } | |
2414 if (g1_policy()->should_initiate_conc_mark()) | |
2415 strcat(verbose_str, " (initial-mark)"); | |
2416 | |
2417 GCCauseSetter x(this, (popular_region == NULL ? | |
2418 GCCause::_g1_inc_collection_pause : | |
2419 GCCause::_g1_pop_region_collection_pause)); | |
2420 | |
2421 // if PrintGCDetails is on, we'll print long statistics information | |
2422 // in the collector policy code, so let's not print this as the output | |
2423 // is messy if we do. | |
2424 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
2425 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
2426 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
2427 | |
2428 ResourceMark rm; | |
2429 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
2430 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
2431 guarantee(!is_gc_active(), "collection is not reentrant"); | |
2432 assert(regions_accounted_for(), "Region leakage!"); | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
352
diff
changeset
|
2433 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
352
diff
changeset
|
2434 increment_gc_time_stamp(); |
342 | 2435 |
2436 if (g1_policy()->in_young_gc_mode()) { | |
2437 assert(check_young_list_well_formed(), | |
2438 "young list should be well formed"); | |
2439 } | |
2440 | |
2441 if (GC_locker::is_active()) { | |
2442 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
2443 } | |
2444 | |
2445 bool abandoned = false; | |
2446 { // Call to jvmpi::post_class_unload_events must occur outside of active GC | |
2447 IsGCActiveMark x; | |
2448 | |
2449 gc_prologue(false); | |
2450 increment_total_collections(); | |
2451 | |
2452 #if G1_REM_SET_LOGGING | |
2453 gclog_or_tty->print_cr("\nJust chose CS, heap:"); | |
2454 print(); | |
2455 #endif | |
2456 | |
2457 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
2458 HandleMark hm; // Discard invalid handles created during verification | |
2459 prepare_for_verify(); | |
2460 gclog_or_tty->print(" VerifyBeforeGC:"); | |
2461 Universe::verify(false); | |
2462 } | |
2463 | |
2464 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
2465 | |
2466 // We want to turn off ref discovere, if necessary, and turn it back on | |
2467 // on again later if we do. | |
2468 bool was_enabled = ref_processor()->discovery_enabled(); | |
2469 if (was_enabled) ref_processor()->disable_discovery(); | |
2470 | |
2471 // Forget the current alloc region (we might even choose it to be part | |
2472 // of the collection set!). | |
2473 abandon_cur_alloc_region(); | |
2474 | |
2475 // The elapsed time induced by the start time below deliberately elides | |
2476 // the possible verification above. | |
2477 double start_time_sec = os::elapsedTime(); | |
2478 GCOverheadReporter::recordSTWStart(start_time_sec); | |
2479 size_t start_used_bytes = used(); | |
2480 if (!G1ConcMark) { | |
2481 do_sync_mark(); | |
2482 } | |
2483 | |
2484 g1_policy()->record_collection_pause_start(start_time_sec, | |
2485 start_used_bytes); | |
2486 | |
2487 #if SCAN_ONLY_VERBOSE | |
2488 _young_list->print(); | |
2489 #endif // SCAN_ONLY_VERBOSE | |
2490 | |
2491 if (g1_policy()->should_initiate_conc_mark()) { | |
2492 concurrent_mark()->checkpointRootsInitialPre(); | |
2493 } | |
2494 save_marks(); | |
2495 | |
2496 // We must do this before any possible evacuation that should propogate | |
2497 // marks, including evacuation of popular objects in a popular pause. | |
2498 if (mark_in_progress()) { | |
2499 double start_time_sec = os::elapsedTime(); | |
2500 | |
2501 _cm->drainAllSATBBuffers(); | |
2502 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; | |
2503 g1_policy()->record_satb_drain_time(finish_mark_ms); | |
2504 | |
2505 } | |
2506 // Record the number of elements currently on the mark stack, so we | |
2507 // only iterate over these. (Since evacuation may add to the mark | |
2508 // stack, doing more exposes race conditions.) If no mark is in | |
2509 // progress, this will be zero. | |
2510 _cm->set_oops_do_bound(); | |
2511 | |
2512 assert(regions_accounted_for(), "Region leakage."); | |
2513 | |
2514 bool abandoned = false; | |
2515 | |
2516 if (mark_in_progress()) | |
2517 concurrent_mark()->newCSet(); | |
2518 | |
2519 // Now choose the CS. | |
2520 if (popular_region == NULL) { | |
2521 g1_policy()->choose_collection_set(); | |
2522 } else { | |
2523 // We may be evacuating a single region (for popularity). | |
2524 g1_policy()->record_popular_pause_preamble_start(); | |
2525 popularity_pause_preamble(popular_region); | |
2526 g1_policy()->record_popular_pause_preamble_end(); | |
2527 abandoned = (g1_policy()->collection_set() == NULL); | |
2528 // Now we allow more regions to be added (we have to collect | |
2529 // all popular regions). | |
2530 if (!abandoned) { | |
2531 g1_policy()->choose_collection_set(popular_region); | |
2532 } | |
2533 } | |
2534 // We may abandon a pause if we find no region that will fit in the MMU | |
2535 // pause. | |
2536 abandoned = (g1_policy()->collection_set() == NULL); | |
2537 | |
2538 // Nothing to do if we were unable to choose a collection set. | |
2539 if (!abandoned) { | |
2540 #if G1_REM_SET_LOGGING | |
2541 gclog_or_tty->print_cr("\nAfter pause, heap:"); | |
2542 print(); | |
2543 #endif | |
2544 | |
2545 setup_surviving_young_words(); | |
2546 | |
2547 // Set up the gc allocation regions. | |
2548 get_gc_alloc_regions(); | |
2549 | |
2550 // Actually do the work... | |
2551 evacuate_collection_set(); | |
2552 free_collection_set(g1_policy()->collection_set()); | |
2553 g1_policy()->clear_collection_set(); | |
2554 | |
2555 if (popular_region != NULL) { | |
2556 // We have to wait until now, because we don't want the region to | |
2557 // be rescheduled for pop-evac during RS update. | |
2558 popular_region->set_popular_pending(false); | |
2559 } | |
2560 | |
2561 release_gc_alloc_regions(); | |
2562 | |
2563 cleanup_surviving_young_words(); | |
2564 | |
2565 if (g1_policy()->in_young_gc_mode()) { | |
2566 _young_list->reset_sampled_info(); | |
2567 assert(check_young_list_empty(true), | |
2568 "young list should be empty"); | |
2569 | |
2570 #if SCAN_ONLY_VERBOSE | |
2571 _young_list->print(); | |
2572 #endif // SCAN_ONLY_VERBOSE | |
2573 | |
2574 _young_list->reset_auxilary_lists(); | |
2575 } | |
2576 } else { | |
2577 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
2578 } | |
2579 | |
2580 if (evacuation_failed()) { | |
2581 _summary_bytes_used = recalculate_used(); | |
2582 } else { | |
2583 // The "used" of the the collection set have already been subtracted | |
2584 // when they were freed. Add in the bytes evacuated. | |
2585 _summary_bytes_used += g1_policy()->bytes_in_to_space(); | |
2586 } | |
2587 | |
2588 if (g1_policy()->in_young_gc_mode() && | |
2589 g1_policy()->should_initiate_conc_mark()) { | |
2590 concurrent_mark()->checkpointRootsInitialPost(); | |
2591 set_marking_started(); | |
2592 doConcurrentMark(); | |
2593 } | |
2594 | |
2595 #if SCAN_ONLY_VERBOSE | |
2596 _young_list->print(); | |
2597 #endif // SCAN_ONLY_VERBOSE | |
2598 | |
2599 double end_time_sec = os::elapsedTime(); | |
2600 g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0); | |
2601 GCOverheadReporter::recordSTWEnd(end_time_sec); | |
2602 g1_policy()->record_collection_pause_end(popular_region != NULL, | |
2603 abandoned); | |
2604 | |
2605 assert(regions_accounted_for(), "Region leakage."); | |
2606 | |
2607 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
2608 HandleMark hm; // Discard invalid handles created during verification | |
2609 gclog_or_tty->print(" VerifyAfterGC:"); | |
2610 Universe::verify(false); | |
2611 } | |
2612 | |
2613 if (was_enabled) ref_processor()->enable_discovery(); | |
2614 | |
2615 { | |
2616 size_t expand_bytes = g1_policy()->expansion_amount(); | |
2617 if (expand_bytes > 0) { | |
2618 size_t bytes_before = capacity(); | |
2619 expand(expand_bytes); | |
2620 } | |
2621 } | |
2622 | |
2623 if (mark_in_progress()) | |
2624 concurrent_mark()->update_g1_committed(); | |
2625 | |
2626 gc_epilogue(false); | |
2627 } | |
2628 | |
2629 assert(verify_region_lists(), "Bad region lists."); | |
2630 | |
2631 if (reset_should_initiate_conc_mark) | |
2632 g1_policy()->set_should_initiate_conc_mark(); | |
2633 | |
2634 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { | |
2635 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
2636 print_tracing_info(); | |
2637 vm_exit(-1); | |
2638 } | |
2639 } | |
2640 | |
2641 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { | |
2642 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
2643 HeapWord* original_top = NULL; | |
2644 if (r != NULL) | |
2645 original_top = r->top(); | |
2646 | |
2647 // We will want to record the used space in r as being there before gc. | |
2648 // One we install it as a GC alloc region it's eligible for allocation. | |
2649 // So record it now and use it later. | |
2650 size_t r_used = 0; | |
2651 if (r != NULL) { | |
2652 r_used = r->used(); | |
2653 | |
2654 if (ParallelGCThreads > 0) { | |
2655 // need to take the lock to guard against two threads calling | |
2656 // get_gc_alloc_region concurrently (very unlikely but...) | |
2657 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2658 r->save_marks(); | |
2659 } | |
2660 } | |
2661 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
2662 _gc_alloc_regions[purpose] = r; | |
2663 if (old_alloc_region != NULL) { | |
2664 // Replace aliases too. | |
2665 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2666 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
2667 _gc_alloc_regions[ap] = r; | |
2668 } | |
2669 } | |
2670 } | |
2671 if (r != NULL) { | |
2672 push_gc_alloc_region(r); | |
2673 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
2674 // We are using a region as a GC alloc region after it has been used | |
2675 // as a mutator allocation region during the current marking cycle. | |
2676 // The mutator-allocated objects are currently implicitly marked, but | |
2677 // when we move hr->next_top_at_mark_start() forward at the the end | |
2678 // of the GC pause, they won't be. We therefore mark all objects in | |
2679 // the "gap". We do this object-by-object, since marking densely | |
2680 // does not currently work right with marking bitmap iteration. This | |
2681 // means we rely on TLAB filling at the start of pauses, and no | |
2682 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
2683 // to fix the marking bitmap iteration. | |
2684 HeapWord* curhw = r->next_top_at_mark_start(); | |
2685 HeapWord* t = original_top; | |
2686 | |
2687 while (curhw < t) { | |
2688 oop cur = (oop)curhw; | |
2689 // We'll assume parallel for generality. This is rare code. | |
2690 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
2691 curhw = curhw + cur->size(); | |
2692 } | |
2693 assert(curhw == t, "Should have parsed correctly."); | |
2694 } | |
2695 if (G1PolicyVerbose > 1) { | |
2696 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
2697 "for survivors:", r->bottom(), original_top, r->end()); | |
2698 r->print(); | |
2699 } | |
2700 g1_policy()->record_before_bytes(r_used); | |
2701 } | |
2702 } | |
2703 | |
2704 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
2705 assert(Thread::current()->is_VM_thread() || | |
2706 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
2707 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
2708 "Precondition."); | |
2709 hr->set_is_gc_alloc_region(true); | |
2710 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
2711 _gc_alloc_region_list = hr; | |
2712 } | |
2713 | |
2714 #ifdef G1_DEBUG | |
2715 class FindGCAllocRegion: public HeapRegionClosure { | |
2716 public: | |
2717 bool doHeapRegion(HeapRegion* r) { | |
2718 if (r->is_gc_alloc_region()) { | |
2719 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
2720 r->hrs_index(), r->bottom()); | |
2721 } | |
2722 return false; | |
2723 } | |
2724 }; | |
2725 #endif // G1_DEBUG | |
2726 | |
2727 void G1CollectedHeap::forget_alloc_region_list() { | |
2728 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
2729 while (_gc_alloc_region_list != NULL) { | |
2730 HeapRegion* r = _gc_alloc_region_list; | |
2731 assert(r->is_gc_alloc_region(), "Invariant."); | |
2732 _gc_alloc_region_list = r->next_gc_alloc_region(); | |
2733 r->set_next_gc_alloc_region(NULL); | |
2734 r->set_is_gc_alloc_region(false); | |
2735 if (r->is_empty()) { | |
2736 ++_free_regions; | |
2737 } | |
2738 } | |
2739 #ifdef G1_DEBUG | |
2740 FindGCAllocRegion fa; | |
2741 heap_region_iterate(&fa); | |
2742 #endif // G1_DEBUG | |
2743 } | |
2744 | |
2745 | |
2746 bool G1CollectedHeap::check_gc_alloc_regions() { | |
2747 // TODO: allocation regions check | |
2748 return true; | |
2749 } | |
2750 | |
2751 void G1CollectedHeap::get_gc_alloc_regions() { | |
2752 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2753 // Create new GC alloc regions. | |
2754 HeapRegion* alloc_region = _gc_alloc_regions[ap]; | |
2755 // Clear this alloc region, so that in case it turns out to be | |
2756 // unacceptable, we end up with no allocation region, rather than a bad | |
2757 // one. | |
2758 _gc_alloc_regions[ap] = NULL; | |
2759 if (alloc_region == NULL || alloc_region->in_collection_set()) { | |
2760 // Can't re-use old one. Allocate a new one. | |
2761 alloc_region = newAllocRegionWithExpansion(ap, 0); | |
2762 } | |
2763 if (alloc_region != NULL) { | |
2764 set_gc_alloc_region(ap, alloc_region); | |
2765 } | |
2766 } | |
2767 // Set alternative regions for allocation purposes that have reached | |
2768 // thier limit. | |
2769 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2770 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
2771 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
2772 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
2773 } | |
2774 } | |
2775 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
2776 } | |
2777 | |
2778 void G1CollectedHeap::release_gc_alloc_regions() { | |
2779 // We keep a separate list of all regions that have been alloc regions in | |
2780 // the current collection pause. Forget that now. | |
2781 forget_alloc_region_list(); | |
2782 | |
2783 // The current alloc regions contain objs that have survived | |
2784 // collection. Make them no longer GC alloc regions. | |
2785 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2786 HeapRegion* r = _gc_alloc_regions[ap]; | |
2787 if (r != NULL && r->is_empty()) { | |
2788 { | |
2789 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
2790 r->set_zero_fill_complete(); | |
2791 put_free_region_on_list_locked(r); | |
2792 } | |
2793 } | |
2794 // set_gc_alloc_region will also NULLify all aliases to the region | |
2795 set_gc_alloc_region(ap, NULL); | |
2796 _gc_alloc_region_counts[ap] = 0; | |
2797 } | |
2798 } | |
2799 | |
2800 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
2801 _drain_in_progress = false; | |
2802 set_evac_failure_closure(cl); | |
2803 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
2804 } | |
2805 | |
2806 void G1CollectedHeap::finalize_for_evac_failure() { | |
2807 assert(_evac_failure_scan_stack != NULL && | |
2808 _evac_failure_scan_stack->length() == 0, | |
2809 "Postcondition"); | |
2810 assert(!_drain_in_progress, "Postcondition"); | |
2811 // Don't have to delete, since the scan stack is a resource object. | |
2812 _evac_failure_scan_stack = NULL; | |
2813 } | |
2814 | |
2815 | |
2816 | |
2817 // *** Sequential G1 Evacuation | |
2818 | |
2819 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
2820 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
2821 // let the caller handle alloc failure | |
2822 if (alloc_region == NULL) return NULL; | |
2823 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
2824 "Either the object is humongous or the region isn't"); | |
2825 HeapWord* block = alloc_region->allocate(word_size); | |
2826 if (block == NULL) { | |
2827 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
2828 } | |
2829 return block; | |
2830 } | |
2831 | |
2832 class G1IsAliveClosure: public BoolObjectClosure { | |
2833 G1CollectedHeap* _g1; | |
2834 public: | |
2835 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
2836 void do_object(oop p) { assert(false, "Do not call."); } | |
2837 bool do_object_b(oop p) { | |
2838 // It is reachable if it is outside the collection set, or is inside | |
2839 // and forwarded. | |
2840 | |
2841 #ifdef G1_DEBUG | |
2842 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
2843 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
2844 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
2845 #endif // G1_DEBUG | |
2846 | |
2847 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
2848 } | |
2849 }; | |
2850 | |
2851 class G1KeepAliveClosure: public OopClosure { | |
2852 G1CollectedHeap* _g1; | |
2853 public: | |
2854 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
2855 void do_oop(narrowOop* p) { | |
2856 guarantee(false, "NYI"); | |
2857 } | |
2858 void do_oop(oop* p) { | |
2859 oop obj = *p; | |
2860 #ifdef G1_DEBUG | |
2861 if (PrintGC && Verbose) { | |
2862 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
2863 p, (void*) obj, (void*) *p); | |
2864 } | |
2865 #endif // G1_DEBUG | |
2866 | |
2867 if (_g1->obj_in_cs(obj)) { | |
2868 assert( obj->is_forwarded(), "invariant" ); | |
2869 *p = obj->forwardee(); | |
2870 | |
2871 #ifdef G1_DEBUG | |
2872 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
2873 (void*) obj, (void*) *p); | |
2874 #endif // G1_DEBUG | |
2875 } | |
2876 } | |
2877 }; | |
2878 | |
2879 class RecreateRSetEntriesClosure: public OopClosure { | |
2880 private: | |
2881 G1CollectedHeap* _g1; | |
2882 G1RemSet* _g1_rem_set; | |
2883 HeapRegion* _from; | |
2884 public: | |
2885 RecreateRSetEntriesClosure(G1CollectedHeap* g1, HeapRegion* from) : | |
2886 _g1(g1), _g1_rem_set(g1->g1_rem_set()), _from(from) | |
2887 {} | |
2888 | |
2889 void do_oop(narrowOop* p) { | |
2890 guarantee(false, "NYI"); | |
2891 } | |
2892 void do_oop(oop* p) { | |
2893 assert(_from->is_in_reserved(p), "paranoia"); | |
2894 if (*p != NULL) { | |
2895 _g1_rem_set->write_ref(_from, p); | |
2896 } | |
2897 } | |
2898 }; | |
2899 | |
2900 class RemoveSelfPointerClosure: public ObjectClosure { | |
2901 private: | |
2902 G1CollectedHeap* _g1; | |
2903 ConcurrentMark* _cm; | |
2904 HeapRegion* _hr; | |
2905 size_t _prev_marked_bytes; | |
2906 size_t _next_marked_bytes; | |
2907 public: | |
2908 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr) : | |
2909 _g1(g1), _cm(_g1->concurrent_mark()), _hr(hr), | |
2910 _prev_marked_bytes(0), _next_marked_bytes(0) | |
2911 {} | |
2912 | |
2913 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
2914 size_t next_marked_bytes() { return _next_marked_bytes; } | |
2915 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2916 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2917 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2918 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2919 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2920 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2921 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2922 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2923 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2924 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2925 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2926 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2927 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2928 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2929 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2930 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2931 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2932 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2933 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2934 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2935 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2936 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2937 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2938 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2939 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2940 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2941 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2942 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2943 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2944 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2945 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2946 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2947 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2948 // collection set. So, we'll recreate such entries now. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2949 RecreateRSetEntriesClosure cl(_g1, _hr); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2950 obj->oop_iterate(&cl); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2951 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2952 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2953 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2954 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2955 MemRegion mr((HeapWord*)obj, obj->size()); |
342 | 2956 SharedHeap::fill_region_with_object(mr); |
2957 _cm->clearRangeBothMaps(mr); | |
2958 } | |
2959 } | |
2960 }; | |
2961 | |
2962 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
2963 HeapRegion* cur = g1_policy()->collection_set(); | |
2964 | |
2965 while (cur != NULL) { | |
2966 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2967 | |
2968 if (cur->evacuation_failed()) { | |
2969 RemoveSelfPointerClosure rspc(_g1h, cur); | |
2970 assert(cur->in_collection_set(), "bad CS"); | |
2971 cur->object_iterate(&rspc); | |
2972 | |
2973 // A number of manipulations to make the TAMS be the current top, | |
2974 // and the marked bytes be the ones observed in the iteration. | |
2975 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
2976 // The comments below are the postconditions achieved by the | |
2977 // calls. Note especially the last such condition, which says that | |
2978 // the count of marked bytes has been properly restored. | |
2979 cur->note_start_of_marking(false); | |
2980 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
2981 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
2982 // _next_marked_bytes == prev_marked_bytes. | |
2983 cur->note_end_of_marking(); | |
2984 // _prev_top_at_mark_start == top(), | |
2985 // _prev_marked_bytes == prev_marked_bytes | |
2986 } | |
2987 // If there is no mark in progress, we modified the _next variables | |
2988 // above needlessly, but harmlessly. | |
2989 if (_g1h->mark_in_progress()) { | |
2990 cur->note_start_of_marking(false); | |
2991 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
2992 // _next_marked_bytes == next_marked_bytes. | |
2993 } | |
2994 | |
2995 // Now make sure the region has the right index in the sorted array. | |
2996 g1_policy()->note_change_in_marked_bytes(cur); | |
2997 } | |
2998 cur = cur->next_in_collection_set(); | |
2999 } | |
3000 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3001 | |
3002 // Now restore saved marks, if any. | |
3003 if (_objs_with_preserved_marks != NULL) { | |
3004 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3005 assert(_objs_with_preserved_marks->length() == | |
3006 _preserved_marks_of_objs->length(), "Both or none."); | |
3007 guarantee(_objs_with_preserved_marks->length() == | |
3008 _preserved_marks_of_objs->length(), "Both or none."); | |
3009 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3010 oop obj = _objs_with_preserved_marks->at(i); | |
3011 markOop m = _preserved_marks_of_objs->at(i); | |
3012 obj->set_mark(m); | |
3013 } | |
3014 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3015 delete _objs_with_preserved_marks; | |
3016 delete _preserved_marks_of_objs; | |
3017 _objs_with_preserved_marks = NULL; | |
3018 _preserved_marks_of_objs = NULL; | |
3019 } | |
3020 } | |
3021 | |
3022 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3023 _evac_failure_scan_stack->push(obj); | |
3024 } | |
3025 | |
3026 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3027 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3028 | |
3029 while (_evac_failure_scan_stack->length() > 0) { | |
3030 oop obj = _evac_failure_scan_stack->pop(); | |
3031 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3032 obj->oop_iterate_backwards(_evac_failure_closure); | |
3033 } | |
3034 } | |
3035 | |
3036 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3037 markOop m = old->mark(); | |
3038 // forward to self | |
3039 assert(!old->is_forwarded(), "precondition"); | |
3040 | |
3041 old->forward_to(old); | |
3042 handle_evacuation_failure_common(old, m); | |
3043 } | |
3044 | |
3045 oop | |
3046 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3047 oop old) { | |
3048 markOop m = old->mark(); | |
3049 oop forward_ptr = old->forward_to_atomic(old); | |
3050 if (forward_ptr == NULL) { | |
3051 // Forward-to-self succeeded. | |
3052 if (_evac_failure_closure != cl) { | |
3053 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3054 assert(!_drain_in_progress, | |
3055 "Should only be true while someone holds the lock."); | |
3056 // Set the global evac-failure closure to the current thread's. | |
3057 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3058 set_evac_failure_closure(cl); | |
3059 // Now do the common part. | |
3060 handle_evacuation_failure_common(old, m); | |
3061 // Reset to NULL. | |
3062 set_evac_failure_closure(NULL); | |
3063 } else { | |
3064 // The lock is already held, and this is recursive. | |
3065 assert(_drain_in_progress, "This should only be the recursive case."); | |
3066 handle_evacuation_failure_common(old, m); | |
3067 } | |
3068 return old; | |
3069 } else { | |
3070 // Someone else had a place to copy it. | |
3071 return forward_ptr; | |
3072 } | |
3073 } | |
3074 | |
3075 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3076 set_evacuation_failed(true); | |
3077 | |
3078 preserve_mark_if_necessary(old, m); | |
3079 | |
3080 HeapRegion* r = heap_region_containing(old); | |
3081 if (!r->evacuation_failed()) { | |
3082 r->set_evacuation_failed(true); | |
3083 if (G1TraceRegions) { | |
3084 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " | |
3085 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3086 r, r->bottom(), r->end()); | |
3087 } | |
3088 } | |
3089 | |
3090 push_on_evac_failure_scan_stack(old); | |
3091 | |
3092 if (!_drain_in_progress) { | |
3093 // prevent recursion in copy_to_survivor_space() | |
3094 _drain_in_progress = true; | |
3095 drain_evac_failure_scan_stack(); | |
3096 _drain_in_progress = false; | |
3097 } | |
3098 } | |
3099 | |
3100 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3101 if (m != markOopDesc::prototype()) { | |
3102 if (_objs_with_preserved_marks == NULL) { | |
3103 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3104 _objs_with_preserved_marks = | |
3105 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3106 _preserved_marks_of_objs = | |
3107 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3108 } | |
3109 _objs_with_preserved_marks->push(obj); | |
3110 _preserved_marks_of_objs->push(m); | |
3111 } | |
3112 } | |
3113 | |
3114 // *** Parallel G1 Evacuation | |
3115 | |
3116 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3117 size_t word_size) { | |
3118 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3119 // let the caller handle alloc failure | |
3120 if (alloc_region == NULL) return NULL; | |
3121 | |
3122 HeapWord* block = alloc_region->par_allocate(word_size); | |
3123 if (block == NULL) { | |
3124 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3125 Mutex::_no_safepoint_check_flag); | |
3126 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3127 } | |
3128 return block; | |
3129 } | |
3130 | |
3131 HeapWord* | |
3132 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3133 HeapRegion* alloc_region, | |
3134 bool par, | |
3135 size_t word_size) { | |
3136 HeapWord* block = NULL; | |
3137 // In the parallel case, a previous thread to obtain the lock may have | |
3138 // already assigned a new gc_alloc_region. | |
3139 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3140 assert(par, "But should only happen in parallel case."); | |
3141 alloc_region = _gc_alloc_regions[purpose]; | |
3142 if (alloc_region == NULL) return NULL; | |
3143 block = alloc_region->par_allocate(word_size); | |
3144 if (block != NULL) return block; | |
3145 // Otherwise, continue; this new region is empty, too. | |
3146 } | |
3147 assert(alloc_region != NULL, "We better have an allocation region"); | |
3148 // Another thread might have obtained alloc_region for the given | |
3149 // purpose, and might be attempting to allocate in it, and might | |
3150 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3151 // region below until we're sure the last allocation has happened. | |
3152 // We ensure this by allocating the remaining space with a garbage | |
3153 // object. | |
3154 if (par) par_allocate_remaining_space(alloc_region); | |
3155 // Now we can do the post-GC stuff on the region. | |
3156 alloc_region->note_end_of_copying(); | |
3157 g1_policy()->record_after_bytes(alloc_region->used()); | |
3158 | |
3159 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3160 // Cannot allocate more regions for the given purpose. | |
3161 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3162 // Is there an alternative? | |
3163 if (purpose != alt_purpose) { | |
3164 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3165 // Has not the alternative region been aliased? | |
3166 if (alloc_region != alt_region) { | |
3167 // Try to allocate in the alternative region. | |
3168 if (par) { | |
3169 block = alt_region->par_allocate(word_size); | |
3170 } else { | |
3171 block = alt_region->allocate(word_size); | |
3172 } | |
3173 // Make an alias. | |
3174 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
3175 } | |
3176 if (block != NULL) { | |
3177 return block; | |
3178 } | |
3179 // Both the allocation region and the alternative one are full | |
3180 // and aliased, replace them with a new allocation region. | |
3181 purpose = alt_purpose; | |
3182 } else { | |
3183 set_gc_alloc_region(purpose, NULL); | |
3184 return NULL; | |
3185 } | |
3186 } | |
3187 | |
3188 // Now allocate a new region for allocation. | |
3189 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3190 | |
3191 // let the caller handle alloc failure | |
3192 if (alloc_region != NULL) { | |
3193 | |
3194 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3195 assert(alloc_region->saved_mark_at_top(), | |
3196 "Mark should have been saved already."); | |
3197 // We used to assert that the region was zero-filled here, but no | |
3198 // longer. | |
3199 | |
3200 // This must be done last: once it's installed, other regions may | |
3201 // allocate in it (without holding the lock.) | |
3202 set_gc_alloc_region(purpose, alloc_region); | |
3203 | |
3204 if (par) { | |
3205 block = alloc_region->par_allocate(word_size); | |
3206 } else { | |
3207 block = alloc_region->allocate(word_size); | |
3208 } | |
3209 // Caller handles alloc failure. | |
3210 } else { | |
3211 // This sets other apis using the same old alloc region to NULL, also. | |
3212 set_gc_alloc_region(purpose, NULL); | |
3213 } | |
3214 return block; // May be NULL. | |
3215 } | |
3216 | |
3217 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3218 HeapWord* block = NULL; | |
3219 size_t free_words; | |
3220 do { | |
3221 free_words = r->free()/HeapWordSize; | |
3222 // If there's too little space, no one can allocate, so we're done. | |
3223 if (free_words < (size_t)oopDesc::header_size()) return; | |
3224 // Otherwise, try to claim it. | |
3225 block = r->par_allocate(free_words); | |
3226 } while (block == NULL); | |
3227 SharedHeap::fill_region_with_object(MemRegion(block, free_words)); | |
3228 } | |
3229 | |
3230 #define use_local_bitmaps 1 | |
3231 #define verify_local_bitmaps 0 | |
3232 | |
3233 #ifndef PRODUCT | |
3234 | |
3235 class GCLabBitMap; | |
3236 class GCLabBitMapClosure: public BitMapClosure { | |
3237 private: | |
3238 ConcurrentMark* _cm; | |
3239 GCLabBitMap* _bitmap; | |
3240 | |
3241 public: | |
3242 GCLabBitMapClosure(ConcurrentMark* cm, | |
3243 GCLabBitMap* bitmap) { | |
3244 _cm = cm; | |
3245 _bitmap = bitmap; | |
3246 } | |
3247 | |
3248 virtual bool do_bit(size_t offset); | |
3249 }; | |
3250 | |
3251 #endif // PRODUCT | |
3252 | |
3253 #define oop_buffer_length 256 | |
3254 | |
3255 class GCLabBitMap: public BitMap { | |
3256 private: | |
3257 ConcurrentMark* _cm; | |
3258 | |
3259 int _shifter; | |
3260 size_t _bitmap_word_covers_words; | |
3261 | |
3262 // beginning of the heap | |
3263 HeapWord* _heap_start; | |
3264 | |
3265 // this is the actual start of the GCLab | |
3266 HeapWord* _real_start_word; | |
3267 | |
3268 // this is the actual end of the GCLab | |
3269 HeapWord* _real_end_word; | |
3270 | |
3271 // this is the first word, possibly located before the actual start | |
3272 // of the GCLab, that corresponds to the first bit of the bitmap | |
3273 HeapWord* _start_word; | |
3274 | |
3275 // size of a GCLab in words | |
3276 size_t _gclab_word_size; | |
3277 | |
3278 static int shifter() { | |
3279 return MinObjAlignment - 1; | |
3280 } | |
3281 | |
3282 // how many heap words does a single bitmap word corresponds to? | |
3283 static size_t bitmap_word_covers_words() { | |
3284 return BitsPerWord << shifter(); | |
3285 } | |
3286 | |
3287 static size_t gclab_word_size() { | |
3288 return ParallelGCG1AllocBufferSize / HeapWordSize; | |
3289 } | |
3290 | |
3291 static size_t bitmap_size_in_bits() { | |
3292 size_t bits_in_bitmap = gclab_word_size() >> shifter(); | |
3293 // We are going to ensure that the beginning of a word in this | |
3294 // bitmap also corresponds to the beginning of a word in the | |
3295 // global marking bitmap. To handle the case where a GCLab | |
3296 // starts from the middle of the bitmap, we need to add enough | |
3297 // space (i.e. up to a bitmap word) to ensure that we have | |
3298 // enough bits in the bitmap. | |
3299 return bits_in_bitmap + BitsPerWord - 1; | |
3300 } | |
3301 public: | |
3302 GCLabBitMap(HeapWord* heap_start) | |
3303 : BitMap(bitmap_size_in_bits()), | |
3304 _cm(G1CollectedHeap::heap()->concurrent_mark()), | |
3305 _shifter(shifter()), | |
3306 _bitmap_word_covers_words(bitmap_word_covers_words()), | |
3307 _heap_start(heap_start), | |
3308 _gclab_word_size(gclab_word_size()), | |
3309 _real_start_word(NULL), | |
3310 _real_end_word(NULL), | |
3311 _start_word(NULL) | |
3312 { | |
3313 guarantee( size_in_words() >= bitmap_size_in_words(), | |
3314 "just making sure"); | |
3315 } | |
3316 | |
3317 inline unsigned heapWordToOffset(HeapWord* addr) { | |
3318 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; | |
3319 assert(offset < size(), "offset should be within bounds"); | |
3320 return offset; | |
3321 } | |
3322 | |
3323 inline HeapWord* offsetToHeapWord(size_t offset) { | |
3324 HeapWord* addr = _start_word + (offset << _shifter); | |
3325 assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); | |
3326 return addr; | |
3327 } | |
3328 | |
3329 bool fields_well_formed() { | |
3330 bool ret1 = (_real_start_word == NULL) && | |
3331 (_real_end_word == NULL) && | |
3332 (_start_word == NULL); | |
3333 if (ret1) | |
3334 return true; | |
3335 | |
3336 bool ret2 = _real_start_word >= _start_word && | |
3337 _start_word < _real_end_word && | |
3338 (_real_start_word + _gclab_word_size) == _real_end_word && | |
3339 (_start_word + _gclab_word_size + _bitmap_word_covers_words) | |
3340 > _real_end_word; | |
3341 return ret2; | |
3342 } | |
3343 | |
3344 inline bool mark(HeapWord* addr) { | |
3345 guarantee(use_local_bitmaps, "invariant"); | |
3346 assert(fields_well_formed(), "invariant"); | |
3347 | |
3348 if (addr >= _real_start_word && addr < _real_end_word) { | |
3349 assert(!isMarked(addr), "should not have already been marked"); | |
3350 | |
3351 // first mark it on the bitmap | |
3352 at_put(heapWordToOffset(addr), true); | |
3353 | |
3354 return true; | |
3355 } else { | |
3356 return false; | |
3357 } | |
3358 } | |
3359 | |
3360 inline bool isMarked(HeapWord* addr) { | |
3361 guarantee(use_local_bitmaps, "invariant"); | |
3362 assert(fields_well_formed(), "invariant"); | |
3363 | |
3364 return at(heapWordToOffset(addr)); | |
3365 } | |
3366 | |
3367 void set_buffer(HeapWord* start) { | |
3368 guarantee(use_local_bitmaps, "invariant"); | |
3369 clear(); | |
3370 | |
3371 assert(start != NULL, "invariant"); | |
3372 _real_start_word = start; | |
3373 _real_end_word = start + _gclab_word_size; | |
3374 | |
3375 size_t diff = | |
3376 pointer_delta(start, _heap_start) % _bitmap_word_covers_words; | |
3377 _start_word = start - diff; | |
3378 | |
3379 assert(fields_well_formed(), "invariant"); | |
3380 } | |
3381 | |
3382 #ifndef PRODUCT | |
3383 void verify() { | |
3384 // verify that the marks have been propagated | |
3385 GCLabBitMapClosure cl(_cm, this); | |
3386 iterate(&cl); | |
3387 } | |
3388 #endif // PRODUCT | |
3389 | |
3390 void retire() { | |
3391 guarantee(use_local_bitmaps, "invariant"); | |
3392 assert(fields_well_formed(), "invariant"); | |
3393 | |
3394 if (_start_word != NULL) { | |
3395 CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); | |
3396 | |
3397 // this means that the bitmap was set up for the GCLab | |
3398 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); | |
3399 | |
3400 mark_bitmap->mostly_disjoint_range_union(this, | |
3401 0, // always start from the start of the bitmap | |
3402 _start_word, | |
3403 size_in_words()); | |
3404 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); | |
3405 | |
3406 #ifndef PRODUCT | |
3407 if (use_local_bitmaps && verify_local_bitmaps) | |
3408 verify(); | |
3409 #endif // PRODUCT | |
3410 } else { | |
3411 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); | |
3412 } | |
3413 } | |
3414 | |
3415 static size_t bitmap_size_in_words() { | |
3416 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; | |
3417 } | |
3418 }; | |
3419 | |
3420 #ifndef PRODUCT | |
3421 | |
3422 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3423 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3424 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3425 return true; | |
3426 } | |
3427 | |
3428 #endif // PRODUCT | |
3429 | |
3430 class G1ParGCAllocBuffer: public ParGCAllocBuffer { | |
3431 private: | |
3432 bool _retired; | |
3433 bool _during_marking; | |
3434 GCLabBitMap _bitmap; | |
3435 | |
3436 public: | |
3437 G1ParGCAllocBuffer() : | |
3438 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize), | |
3439 _during_marking(G1CollectedHeap::heap()->mark_in_progress()), | |
3440 _bitmap(G1CollectedHeap::heap()->reserved_region().start()), | |
3441 _retired(false) | |
3442 { } | |
3443 | |
3444 inline bool mark(HeapWord* addr) { | |
3445 guarantee(use_local_bitmaps, "invariant"); | |
3446 assert(_during_marking, "invariant"); | |
3447 return _bitmap.mark(addr); | |
3448 } | |
3449 | |
3450 inline void set_buf(HeapWord* buf) { | |
3451 if (use_local_bitmaps && _during_marking) | |
3452 _bitmap.set_buffer(buf); | |
3453 ParGCAllocBuffer::set_buf(buf); | |
3454 _retired = false; | |
3455 } | |
3456 | |
3457 inline void retire(bool end_of_gc, bool retain) { | |
3458 if (_retired) | |
3459 return; | |
3460 if (use_local_bitmaps && _during_marking) { | |
3461 _bitmap.retire(); | |
3462 } | |
3463 ParGCAllocBuffer::retire(end_of_gc, retain); | |
3464 _retired = true; | |
3465 } | |
3466 }; | |
3467 | |
3468 | |
3469 class G1ParScanThreadState : public StackObj { | |
3470 protected: | |
3471 G1CollectedHeap* _g1h; | |
3472 RefToScanQueue* _refs; | |
3473 | |
3474 typedef GrowableArray<oop*> OverflowQueue; | |
3475 OverflowQueue* _overflowed_refs; | |
3476 | |
3477 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; | |
3478 | |
3479 size_t _alloc_buffer_waste; | |
3480 size_t _undo_waste; | |
3481 | |
3482 OopsInHeapRegionClosure* _evac_failure_cl; | |
3483 G1ParScanHeapEvacClosure* _evac_cl; | |
3484 G1ParScanPartialArrayClosure* _partial_scan_cl; | |
3485 | |
3486 int _hash_seed; | |
3487 int _queue_num; | |
3488 | |
3489 int _term_attempts; | |
3490 #if G1_DETAILED_STATS | |
3491 int _pushes, _pops, _steals, _steal_attempts; | |
3492 int _overflow_pushes; | |
3493 #endif | |
3494 | |
3495 double _start; | |
3496 double _start_strong_roots; | |
3497 double _strong_roots_time; | |
3498 double _start_term; | |
3499 double _term_time; | |
3500 | |
3501 // Map from young-age-index (0 == not young, 1 is youngest) to | |
3502 // surviving words. base is what we get back from the malloc call | |
3503 size_t* _surviving_young_words_base; | |
3504 // this points into the array, as we use the first few entries for padding | |
3505 size_t* _surviving_young_words; | |
3506 | |
3507 #define PADDING_ELEM_NUM (64 / sizeof(size_t)) | |
3508 | |
3509 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } | |
3510 | |
3511 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } | |
3512 | |
3513 public: | |
3514 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) | |
3515 : _g1h(g1h), | |
3516 _refs(g1h->task_queue(queue_num)), | |
3517 _hash_seed(17), _queue_num(queue_num), | |
3518 _term_attempts(0), | |
3519 #if G1_DETAILED_STATS | |
3520 _pushes(0), _pops(0), _steals(0), | |
3521 _steal_attempts(0), _overflow_pushes(0), | |
3522 #endif | |
3523 _strong_roots_time(0), _term_time(0), | |
3524 _alloc_buffer_waste(0), _undo_waste(0) | |
3525 { | |
3526 // we allocate G1YoungSurvRateNumRegions plus one entries, since | |
3527 // we "sacrifice" entry 0 to keep track of surviving bytes for | |
3528 // non-young regions (where the age is -1) | |
3529 // We also add a few elements at the beginning and at the end in | |
3530 // an attempt to eliminate cache contention | |
3531 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); | |
3532 size_t array_length = PADDING_ELEM_NUM + | |
3533 real_length + | |
3534 PADDING_ELEM_NUM; | |
3535 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3536 if (_surviving_young_words_base == NULL) | |
3537 vm_exit_out_of_memory(array_length * sizeof(size_t), | |
3538 "Not enough space for young surv histo."); | |
3539 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; | |
3540 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); | |
3541 | |
3542 _overflowed_refs = new OverflowQueue(10); | |
3543 | |
3544 _start = os::elapsedTime(); | |
3545 } | |
3546 | |
3547 ~G1ParScanThreadState() { | |
3548 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); | |
3549 } | |
3550 | |
3551 RefToScanQueue* refs() { return _refs; } | |
3552 OverflowQueue* overflowed_refs() { return _overflowed_refs; } | |
3553 | |
3554 inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { | |
3555 return &_alloc_buffers[purpose]; | |
3556 } | |
3557 | |
3558 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } | |
3559 size_t undo_waste() { return _undo_waste; } | |
3560 | |
3561 void push_on_queue(oop* ref) { | |
3562 if (!refs()->push(ref)) { | |
3563 overflowed_refs()->push(ref); | |
3564 IF_G1_DETAILED_STATS(note_overflow_push()); | |
3565 } else { | |
3566 IF_G1_DETAILED_STATS(note_push()); | |
3567 } | |
3568 } | |
3569 | |
3570 void pop_from_queue(oop*& ref) { | |
3571 if (!refs()->pop_local(ref)) { | |
3572 ref = NULL; | |
3573 } else { | |
3574 IF_G1_DETAILED_STATS(note_pop()); | |
3575 } | |
3576 } | |
3577 | |
3578 void pop_from_overflow_queue(oop*& ref) { | |
3579 ref = overflowed_refs()->pop(); | |
3580 } | |
3581 | |
3582 int refs_to_scan() { return refs()->size(); } | |
3583 int overflowed_refs_to_scan() { return overflowed_refs()->length(); } | |
3584 | |
3585 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { | |
3586 | |
3587 HeapWord* obj = NULL; | |
3588 if (word_sz * 100 < | |
3589 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) * | |
3590 ParallelGCBufferWastePct) { | |
3591 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); | |
3592 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); | |
3593 alloc_buf->retire(false, false); | |
3594 | |
3595 HeapWord* buf = | |
3596 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize); | |
3597 if (buf == NULL) return NULL; // Let caller handle allocation failure. | |
3598 // Otherwise. | |
3599 alloc_buf->set_buf(buf); | |
3600 | |
3601 obj = alloc_buf->allocate(word_sz); | |
3602 assert(obj != NULL, "buffer was definitely big enough..."); | |
3603 } | |
3604 else { | |
3605 obj = _g1h->par_allocate_during_gc(purpose, word_sz); | |
3606 } | |
3607 return obj; | |
3608 } | |
3609 | |
3610 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { | |
3611 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); | |
3612 if (obj != NULL) return obj; | |
3613 return allocate_slow(purpose, word_sz); | |
3614 } | |
3615 | |
3616 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { | |
3617 if (alloc_buffer(purpose)->contains(obj)) { | |
3618 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), | |
3619 "should contain whole object"); | |
3620 alloc_buffer(purpose)->undo_allocation(obj, word_sz); | |
3621 } | |
3622 else { | |
3623 SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); | |
3624 add_to_undo_waste(word_sz); | |
3625 } | |
3626 } | |
3627 | |
3628 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { | |
3629 _evac_failure_cl = evac_failure_cl; | |
3630 } | |
3631 OopsInHeapRegionClosure* evac_failure_closure() { | |
3632 return _evac_failure_cl; | |
3633 } | |
3634 | |
3635 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { | |
3636 _evac_cl = evac_cl; | |
3637 } | |
3638 | |
3639 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { | |
3640 _partial_scan_cl = partial_scan_cl; | |
3641 } | |
3642 | |
3643 int* hash_seed() { return &_hash_seed; } | |
3644 int queue_num() { return _queue_num; } | |
3645 | |
3646 int term_attempts() { return _term_attempts; } | |
3647 void note_term_attempt() { _term_attempts++; } | |
3648 | |
3649 #if G1_DETAILED_STATS | |
3650 int pushes() { return _pushes; } | |
3651 int pops() { return _pops; } | |
3652 int steals() { return _steals; } | |
3653 int steal_attempts() { return _steal_attempts; } | |
3654 int overflow_pushes() { return _overflow_pushes; } | |
3655 | |
3656 void note_push() { _pushes++; } | |
3657 void note_pop() { _pops++; } | |
3658 void note_steal() { _steals++; } | |
3659 void note_steal_attempt() { _steal_attempts++; } | |
3660 void note_overflow_push() { _overflow_pushes++; } | |
3661 #endif | |
3662 | |
3663 void start_strong_roots() { | |
3664 _start_strong_roots = os::elapsedTime(); | |
3665 } | |
3666 void end_strong_roots() { | |
3667 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); | |
3668 } | |
3669 double strong_roots_time() { return _strong_roots_time; } | |
3670 | |
3671 void start_term_time() { | |
3672 note_term_attempt(); | |
3673 _start_term = os::elapsedTime(); | |
3674 } | |
3675 void end_term_time() { | |
3676 _term_time += (os::elapsedTime() - _start_term); | |
3677 } | |
3678 double term_time() { return _term_time; } | |
3679 | |
3680 double elapsed() { | |
3681 return os::elapsedTime() - _start; | |
3682 } | |
3683 | |
3684 size_t* surviving_young_words() { | |
3685 // We add on to hide entry 0 which accumulates surviving words for | |
3686 // age -1 regions (i.e. non-young ones) | |
3687 return _surviving_young_words; | |
3688 } | |
3689 | |
3690 void retire_alloc_buffers() { | |
3691 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3692 size_t waste = _alloc_buffers[ap].words_remaining(); | |
3693 add_to_alloc_buffer_waste(waste); | |
3694 _alloc_buffers[ap].retire(true, false); | |
3695 } | |
3696 } | |
3697 | |
3698 void trim_queue() { | |
3699 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { | |
3700 oop *ref_to_scan = NULL; | |
3701 if (overflowed_refs_to_scan() == 0) { | |
3702 pop_from_queue(ref_to_scan); | |
3703 } else { | |
3704 pop_from_overflow_queue(ref_to_scan); | |
3705 } | |
3706 if (ref_to_scan != NULL) { | |
3707 if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) { | |
3708 _partial_scan_cl->do_oop_nv(ref_to_scan); | |
3709 } else { | |
3710 // Note: we can use "raw" versions of "region_containing" because | |
3711 // "obj_to_scan" is definitely in the heap, and is not in a | |
3712 // humongous region. | |
3713 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); | |
3714 _evac_cl->set_region(r); | |
3715 _evac_cl->do_oop_nv(ref_to_scan); | |
3716 } | |
3717 } | |
3718 } | |
3719 } | |
3720 }; | |
3721 | |
3722 | |
3723 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | |
3724 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
3725 _par_scan_state(par_scan_state) { } | |
3726 | |
3727 // This closure is applied to the fields of the objects that have just been copied. | |
3728 // Should probably be made inline and moved in g1OopClosures.inline.hpp. | |
3729 void G1ParScanClosure::do_oop_nv(oop* p) { | |
3730 oop obj = *p; | |
3731 if (obj != NULL) { | |
3732 if (_g1->obj_in_cs(obj)) { | |
3733 if (obj->is_forwarded()) { | |
3734 *p = obj->forwardee(); | |
3735 } else { | |
3736 _par_scan_state->push_on_queue(p); | |
3737 return; | |
3738 } | |
3739 } | |
3740 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); | |
3741 } | |
3742 } | |
3743 | |
3744 void G1ParCopyHelper::mark_forwardee(oop* p) { | |
3745 // This is called _after_ do_oop_work has been called, hence after | |
3746 // the object has been relocated to its new location and *p points | |
3747 // to its new location. | |
3748 | |
3749 oop thisOop = *p; | |
3750 if (thisOop != NULL) { | |
3751 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), | |
3752 "shouldn't still be in the CSet if evacuation didn't fail."); | |
3753 HeapWord* addr = (HeapWord*)thisOop; | |
3754 if (_g1->is_in_g1_reserved(addr)) | |
3755 _cm->grayRoot(oop(addr)); | |
3756 } | |
3757 } | |
3758 | |
3759 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
3760 size_t word_sz = old->size(); | |
3761 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
3762 // +1 to make the -1 indexes valid... | |
3763 int young_index = from_region->young_index_in_cset()+1; | |
3764 assert( (from_region->is_young() && young_index > 0) || | |
3765 (!from_region->is_young() && young_index == 0), "invariant" ); | |
3766 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
3767 markOop m = old->mark(); | |
3768 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(), | |
3769 word_sz); | |
3770 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
3771 oop obj = oop(obj_ptr); | |
3772 | |
3773 if (obj_ptr == NULL) { | |
3774 // This will either forward-to-self, or detect that someone else has | |
3775 // installed a forwarding pointer. | |
3776 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
3777 return _g1->handle_evacuation_failure_par(cl, old); | |
3778 } | |
3779 | |
3780 oop forward_ptr = old->forward_to_atomic(obj); | |
3781 if (forward_ptr == NULL) { | |
3782 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
3783 obj->set_mark(m); | |
3784 if (g1p->track_object_age(alloc_purpose)) { | |
3785 obj->incr_age(); | |
3786 } | |
3787 // preserve "next" mark bit | |
3788 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
3789 if (!use_local_bitmaps || | |
3790 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
3791 // if we couldn't mark it on the local bitmap (this happens when | |
3792 // the object was not allocated in the GCLab), we have to bite | |
3793 // the bullet and do the standard parallel mark | |
3794 _cm->markAndGrayObjectIfNecessary(obj); | |
3795 } | |
3796 #if 1 | |
3797 if (_g1->isMarkedNext(old)) { | |
3798 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
3799 } | |
3800 #endif | |
3801 } | |
3802 | |
3803 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
3804 surv_young_words[young_index] += word_sz; | |
3805 | |
3806 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
3807 arrayOop(old)->set_length(0); | |
3808 _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK)); | |
3809 } else { | |
3810 _scanner->set_region(_g1->heap_region_containing(obj)); | |
3811 obj->oop_iterate_backwards(_scanner); | |
3812 } | |
3813 } else { | |
3814 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
3815 obj = forward_ptr; | |
3816 } | |
3817 return obj; | |
3818 } | |
3819 | |
3820 template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> | |
3821 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) { | |
3822 oop obj = *p; | |
3823 assert(barrier != G1BarrierRS || obj != NULL, | |
3824 "Precondition: G1BarrierRS implies obj is nonNull"); | |
3825 | |
3826 if (obj != NULL) { | |
3827 if (_g1->obj_in_cs(obj)) { | |
3828 #if G1_REM_SET_LOGGING | |
3829 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.", | |
3830 p, (void*) obj); | |
3831 #endif | |
3832 if (obj->is_forwarded()) { | |
3833 *p = obj->forwardee(); | |
3834 } else { | |
3835 *p = copy_to_survivor_space(obj); | |
3836 } | |
3837 // When scanning the RS, we only care about objs in CS. | |
3838 if (barrier == G1BarrierRS) { | |
3839 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); | |
3840 } | |
3841 } | |
3842 // When scanning moved objs, must look at all oops. | |
3843 if (barrier == G1BarrierEvac) { | |
3844 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); | |
3845 } | |
3846 | |
3847 if (do_gen_barrier) { | |
3848 par_do_barrier(p); | |
3849 } | |
3850 } | |
3851 } | |
3852 | |
3853 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); | |
3854 | |
3855 template <class T> void G1ParScanPartialArrayClosure::process_array_chunk( | |
3856 oop obj, int start, int end) { | |
3857 // process our set of indices (include header in first chunk) | |
3858 assert(start < end, "invariant"); | |
3859 T* const base = (T*)objArrayOop(obj)->base(); | |
3860 T* const start_addr = base + start; | |
3861 T* const end_addr = base + end; | |
3862 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); | |
3863 _scanner.set_region(_g1->heap_region_containing(obj)); | |
3864 obj->oop_iterate(&_scanner, mr); | |
3865 } | |
3866 | |
3867 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { | |
3868 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); | |
3869 oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK); | |
3870 assert(old->is_objArray(), "must be obj array"); | |
3871 assert(old->is_forwarded(), "must be forwarded"); | |
3872 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
3873 | |
3874 objArrayOop obj = objArrayOop(old->forwardee()); | |
3875 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
3876 // Process ParGCArrayScanChunk elements now | |
3877 // and push the remainder back onto queue | |
3878 int start = arrayOop(old)->length(); | |
3879 int end = obj->length(); | |
3880 int remainder = end - start; | |
3881 assert(start <= end, "just checking"); | |
3882 if (remainder > 2 * ParGCArrayScanChunk) { | |
3883 // Test above combines last partial chunk with a full chunk | |
3884 end = start + ParGCArrayScanChunk; | |
3885 arrayOop(old)->set_length(end); | |
3886 // Push remainder. | |
3887 _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK)); | |
3888 } else { | |
3889 // Restore length so that the heap remains parsable in | |
3890 // case of evacuation failure. | |
3891 arrayOop(old)->set_length(end); | |
3892 } | |
3893 | |
3894 // process our set of indices (include header in first chunk) | |
3895 process_array_chunk<oop>(obj, start, end); | |
3896 oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start); | |
3897 oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length | |
3898 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); | |
3899 _scanner.set_region(_g1->heap_region_containing(obj)); | |
3900 obj->oop_iterate(&_scanner, mr); | |
3901 } | |
3902 | |
3903 int G1ScanAndBalanceClosure::_nq = 0; | |
3904 | |
3905 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
3906 protected: | |
3907 G1CollectedHeap* _g1h; | |
3908 G1ParScanThreadState* _par_scan_state; | |
3909 RefToScanQueueSet* _queues; | |
3910 ParallelTaskTerminator* _terminator; | |
3911 | |
3912 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
3913 RefToScanQueueSet* queues() { return _queues; } | |
3914 ParallelTaskTerminator* terminator() { return _terminator; } | |
3915 | |
3916 public: | |
3917 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
3918 G1ParScanThreadState* par_scan_state, | |
3919 RefToScanQueueSet* queues, | |
3920 ParallelTaskTerminator* terminator) | |
3921 : _g1h(g1h), _par_scan_state(par_scan_state), | |
3922 _queues(queues), _terminator(terminator) {} | |
3923 | |
3924 void do_void() { | |
3925 G1ParScanThreadState* pss = par_scan_state(); | |
3926 while (true) { | |
3927 oop* ref_to_scan; | |
3928 pss->trim_queue(); | |
3929 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); | |
3930 if (queues()->steal(pss->queue_num(), | |
3931 pss->hash_seed(), | |
3932 ref_to_scan)) { | |
3933 IF_G1_DETAILED_STATS(pss->note_steal()); | |
3934 pss->push_on_queue(ref_to_scan); | |
3935 continue; | |
3936 } | |
3937 pss->start_term_time(); | |
3938 if (terminator()->offer_termination()) break; | |
3939 pss->end_term_time(); | |
3940 } | |
3941 pss->end_term_time(); | |
3942 pss->retire_alloc_buffers(); | |
3943 } | |
3944 }; | |
3945 | |
3946 class G1ParTask : public AbstractGangTask { | |
3947 protected: | |
3948 G1CollectedHeap* _g1h; | |
3949 RefToScanQueueSet *_queues; | |
3950 ParallelTaskTerminator _terminator; | |
3951 | |
3952 Mutex _stats_lock; | |
3953 Mutex* stats_lock() { return &_stats_lock; } | |
3954 | |
3955 size_t getNCards() { | |
3956 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
3957 / G1BlockOffsetSharedArray::N_bytes; | |
3958 } | |
3959 | |
3960 public: | |
3961 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
3962 : AbstractGangTask("G1 collection"), | |
3963 _g1h(g1h), | |
3964 _queues(task_queues), | |
3965 _terminator(workers, _queues), | |
3966 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) | |
3967 {} | |
3968 | |
3969 RefToScanQueueSet* queues() { return _queues; } | |
3970 | |
3971 RefToScanQueue *work_queue(int i) { | |
3972 return queues()->queue(i); | |
3973 } | |
3974 | |
3975 void work(int i) { | |
3976 ResourceMark rm; | |
3977 HandleMark hm; | |
3978 | |
3979 G1ParScanThreadState pss(_g1h, i); | |
3980 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
3981 G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss); | |
3982 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
3983 | |
3984 pss.set_evac_closure(&scan_evac_cl); | |
3985 pss.set_evac_failure_closure(&evac_failure_cl); | |
3986 pss.set_partial_scan_closure(&partial_scan_cl); | |
3987 | |
3988 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
3989 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
3990 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
3991 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); | |
3992 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
3993 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
3994 | |
3995 OopsInHeapRegionClosure *scan_root_cl; | |
3996 OopsInHeapRegionClosure *scan_perm_cl; | |
3997 OopsInHeapRegionClosure *scan_so_cl; | |
3998 | |
3999 if (_g1h->g1_policy()->should_initiate_conc_mark()) { | |
4000 scan_root_cl = &scan_mark_root_cl; | |
4001 scan_perm_cl = &scan_mark_perm_cl; | |
4002 scan_so_cl = &scan_mark_heap_rs_cl; | |
4003 } else { | |
4004 scan_root_cl = &only_scan_root_cl; | |
4005 scan_perm_cl = &only_scan_perm_cl; | |
4006 scan_so_cl = &only_scan_heap_rs_cl; | |
4007 } | |
4008 | |
4009 pss.start_strong_roots(); | |
4010 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4011 SharedHeap::SO_AllClasses, | |
4012 scan_root_cl, | |
4013 &only_scan_heap_rs_cl, | |
4014 scan_so_cl, | |
4015 scan_perm_cl, | |
4016 i); | |
4017 pss.end_strong_roots(); | |
4018 { | |
4019 double start = os::elapsedTime(); | |
4020 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4021 evac.do_void(); | |
4022 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4023 double term_ms = pss.term_time()*1000.0; | |
4024 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
4025 _g1h->g1_policy()->record_termination_time(i, term_ms); | |
4026 } | |
4027 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); | |
4028 | |
4029 // Clean up any par-expanded rem sets. | |
4030 HeapRegionRemSet::par_cleanup(); | |
4031 | |
4032 MutexLocker x(stats_lock()); | |
4033 if (ParallelGCVerbose) { | |
4034 gclog_or_tty->print("Thread %d complete:\n", i); | |
4035 #if G1_DETAILED_STATS | |
4036 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", | |
4037 pss.pushes(), | |
4038 pss.pops(), | |
4039 pss.overflow_pushes(), | |
4040 pss.steals(), | |
4041 pss.steal_attempts()); | |
4042 #endif | |
4043 double elapsed = pss.elapsed(); | |
4044 double strong_roots = pss.strong_roots_time(); | |
4045 double term = pss.term_time(); | |
4046 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" | |
4047 " Strong roots: %7.2f ms (%6.2f%%)\n" | |
4048 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", | |
4049 elapsed * 1000.0, | |
4050 strong_roots * 1000.0, (strong_roots*100.0/elapsed), | |
4051 term * 1000.0, (term*100.0/elapsed), | |
4052 pss.term_attempts()); | |
4053 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); | |
4054 gclog_or_tty->print(" Waste: %8dK\n" | |
4055 " Alloc Buffer: %8dK\n" | |
4056 " Undo: %8dK\n", | |
4057 (total_waste * HeapWordSize) / K, | |
4058 (pss.alloc_buffer_waste() * HeapWordSize) / K, | |
4059 (pss.undo_waste() * HeapWordSize) / K); | |
4060 } | |
4061 | |
4062 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
4063 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
4064 } | |
4065 }; | |
4066 | |
4067 // *** Common G1 Evacuation Stuff | |
4068 | |
4069 class G1CountClosure: public OopsInHeapRegionClosure { | |
4070 public: | |
4071 int n; | |
4072 G1CountClosure() : n(0) {} | |
4073 void do_oop(narrowOop* p) { | |
4074 guarantee(false, "NYI"); | |
4075 } | |
4076 void do_oop(oop* p) { | |
4077 oop obj = *p; | |
4078 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), | |
4079 "Rem set closure called on non-rem-set pointer."); | |
4080 n++; | |
4081 } | |
4082 }; | |
4083 | |
4084 static G1CountClosure count_closure; | |
4085 | |
4086 void | |
4087 G1CollectedHeap:: | |
4088 g1_process_strong_roots(bool collecting_perm_gen, | |
4089 SharedHeap::ScanningOption so, | |
4090 OopClosure* scan_non_heap_roots, | |
4091 OopsInHeapRegionClosure* scan_rs, | |
4092 OopsInHeapRegionClosure* scan_so, | |
4093 OopsInGenClosure* scan_perm, | |
4094 int worker_i) { | |
4095 // First scan the strong roots, including the perm gen. | |
4096 double ext_roots_start = os::elapsedTime(); | |
4097 double closure_app_time_sec = 0.0; | |
4098 | |
4099 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4100 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4101 buf_scan_perm.set_generation(perm_gen()); | |
4102 | |
4103 process_strong_roots(collecting_perm_gen, so, | |
4104 &buf_scan_non_heap_roots, | |
4105 &buf_scan_perm); | |
4106 // Finish up any enqueued closure apps. | |
4107 buf_scan_non_heap_roots.done(); | |
4108 buf_scan_perm.done(); | |
4109 double ext_roots_end = os::elapsedTime(); | |
4110 g1_policy()->reset_obj_copy_time(worker_i); | |
4111 double obj_copy_time_sec = | |
4112 buf_scan_non_heap_roots.closure_app_seconds() + | |
4113 buf_scan_perm.closure_app_seconds(); | |
4114 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4115 double ext_root_time_ms = | |
4116 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4117 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4118 | |
4119 // Scan strong roots in mark stack. | |
4120 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4121 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4122 } | |
4123 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4124 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4125 | |
4126 // XXX What should this be doing in the parallel case? | |
4127 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4128 if (G1VerifyRemSet) { | |
4129 // :::: FIXME :::: | |
4130 // The stupid remembered set doesn't know how to filter out dead | |
4131 // objects, which the smart one does, and so when it is created | |
4132 // and then compared the number of entries in each differs and | |
4133 // the verification code fails. | |
4134 guarantee(false, "verification code is broken, see note"); | |
4135 | |
4136 // Let's make sure that the current rem set agrees with the stupidest | |
4137 // one possible! | |
4138 bool refs_enabled = ref_processor()->discovery_enabled(); | |
4139 if (refs_enabled) ref_processor()->disable_discovery(); | |
4140 StupidG1RemSet stupid(this); | |
4141 count_closure.n = 0; | |
4142 stupid.oops_into_collection_set_do(&count_closure, worker_i); | |
4143 int stupid_n = count_closure.n; | |
4144 count_closure.n = 0; | |
4145 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i); | |
4146 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ."); | |
4147 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n); | |
4148 if (refs_enabled) ref_processor()->enable_discovery(); | |
4149 } | |
4150 if (scan_so != NULL) { | |
4151 scan_scan_only_set(scan_so, worker_i); | |
4152 } | |
4153 // Now scan the complement of the collection set. | |
4154 if (scan_rs != NULL) { | |
4155 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4156 } | |
4157 // Finish with the ref_processor roots. | |
4158 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4159 ref_processor()->oops_do(scan_non_heap_roots); | |
4160 } | |
4161 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4162 _process_strong_tasks->all_tasks_completed(); | |
4163 } | |
4164 | |
4165 void | |
4166 G1CollectedHeap::scan_scan_only_region(HeapRegion* r, | |
4167 OopsInHeapRegionClosure* oc, | |
4168 int worker_i) { | |
4169 HeapWord* startAddr = r->bottom(); | |
4170 HeapWord* endAddr = r->used_region().end(); | |
4171 | |
4172 oc->set_region(r); | |
4173 | |
4174 HeapWord* p = r->bottom(); | |
4175 HeapWord* t = r->top(); | |
4176 guarantee( p == r->next_top_at_mark_start(), "invariant" ); | |
4177 while (p < t) { | |
4178 oop obj = oop(p); | |
4179 p += obj->oop_iterate(oc); | |
4180 } | |
4181 } | |
4182 | |
4183 void | |
4184 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
4185 int worker_i) { | |
4186 double start = os::elapsedTime(); | |
4187 | |
4188 BufferingOopsInHeapRegionClosure boc(oc); | |
4189 | |
4190 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); | |
4191 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); | |
4192 | |
4193 OopsInHeapRegionClosure *foc; | |
4194 if (g1_policy()->should_initiate_conc_mark()) | |
4195 foc = &scan_and_mark; | |
4196 else | |
4197 foc = &scan_only; | |
4198 | |
4199 HeapRegion* hr; | |
4200 int n = 0; | |
4201 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { | |
4202 scan_scan_only_region(hr, foc, worker_i); | |
4203 ++n; | |
4204 } | |
4205 boc.done(); | |
4206 | |
4207 double closure_app_s = boc.closure_app_seconds(); | |
4208 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); | |
4209 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; | |
4210 g1_policy()->record_scan_only_time(worker_i, ms, n); | |
4211 } | |
4212 | |
4213 void | |
4214 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4215 OopClosure* non_root_closure) { | |
4216 SharedHeap::process_weak_roots(root_closure, non_root_closure); | |
4217 } | |
4218 | |
4219 | |
4220 class SaveMarksClosure: public HeapRegionClosure { | |
4221 public: | |
4222 bool doHeapRegion(HeapRegion* r) { | |
4223 r->save_marks(); | |
4224 return false; | |
4225 } | |
4226 }; | |
4227 | |
4228 void G1CollectedHeap::save_marks() { | |
4229 if (ParallelGCThreads == 0) { | |
4230 SaveMarksClosure sm; | |
4231 heap_region_iterate(&sm); | |
4232 } | |
4233 // We do this even in the parallel case | |
4234 perm_gen()->save_marks(); | |
4235 } | |
4236 | |
4237 void G1CollectedHeap::evacuate_collection_set() { | |
4238 set_evacuation_failed(false); | |
4239 | |
4240 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4241 concurrent_g1_refine()->set_use_cache(false); | |
4242 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); | |
4243 | |
4244 set_par_threads(n_workers); | |
4245 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4246 | |
4247 init_for_evac_failure(NULL); | |
4248 | |
4249 change_strong_roots_parity(); // In preparation for parallel strong roots. | |
4250 rem_set()->prepare_for_younger_refs_iterate(true); | |
4251 double start_par = os::elapsedTime(); | |
4252 | |
4253 if (ParallelGCThreads > 0) { | |
4254 // The individual threads will set their evac-failure closures. | |
4255 workers()->run_task(&g1_par_task); | |
4256 } else { | |
4257 g1_par_task.work(0); | |
4258 } | |
4259 | |
4260 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4261 g1_policy()->record_par_time(par_time); | |
4262 set_par_threads(0); | |
4263 // Is this the right thing to do here? We don't save marks | |
4264 // on individual heap regions when we allocate from | |
4265 // them in parallel, so this seems like the correct place for this. | |
4266 all_alloc_regions_note_end_of_copying(); | |
4267 { | |
4268 G1IsAliveClosure is_alive(this); | |
4269 G1KeepAliveClosure keep_alive(this); | |
4270 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4271 } | |
4272 | |
4273 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | |
4274 concurrent_g1_refine()->set_use_cache(true); | |
4275 | |
4276 finalize_for_evac_failure(); | |
4277 | |
4278 // Must do this before removing self-forwarding pointers, which clears | |
4279 // the per-region evac-failure flags. | |
4280 concurrent_mark()->complete_marking_in_collection_set(); | |
4281 | |
4282 if (evacuation_failed()) { | |
4283 remove_self_forwarding_pointers(); | |
4284 | |
4285 if (PrintGCDetails) { | |
4286 gclog_or_tty->print(" (evacuation failed)"); | |
4287 } else if (PrintGC) { | |
4288 gclog_or_tty->print("--"); | |
4289 } | |
4290 } | |
4291 | |
4292 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
4293 } | |
4294 | |
4295 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4296 size_t pre_used = 0; | |
4297 size_t cleared_h_regions = 0; | |
4298 size_t freed_regions = 0; | |
4299 UncleanRegionList local_list; | |
4300 | |
4301 HeapWord* start = hr->bottom(); | |
4302 HeapWord* end = hr->prev_top_at_mark_start(); | |
4303 size_t used_bytes = hr->used(); | |
4304 size_t live_bytes = hr->max_live_bytes(); | |
4305 if (used_bytes > 0) { | |
4306 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4307 } else { | |
4308 guarantee( live_bytes == 0, "invariant" ); | |
4309 } | |
4310 | |
4311 size_t garbage_bytes = used_bytes - live_bytes; | |
4312 if (garbage_bytes > 0) | |
4313 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4314 | |
4315 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4316 &local_list); | |
4317 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4318 &local_list); | |
4319 } | |
4320 | |
4321 void | |
4322 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4323 size_t& pre_used, | |
4324 size_t& cleared_h_regions, | |
4325 size_t& freed_regions, | |
4326 UncleanRegionList* list, | |
4327 bool par) { | |
4328 assert(!hr->popular(), "should not free popular regions"); | |
4329 pre_used += hr->used(); | |
4330 if (hr->isHumongous()) { | |
4331 assert(hr->startsHumongous(), | |
4332 "Only the start of a humongous region should be freed."); | |
4333 int ind = _hrs->find(hr); | |
4334 assert(ind != -1, "Should have an index."); | |
4335 // Clear the start region. | |
4336 hr->hr_clear(par, true /*clear_space*/); | |
4337 list->insert_before_head(hr); | |
4338 cleared_h_regions++; | |
4339 freed_regions++; | |
4340 // Clear any continued regions. | |
4341 ind++; | |
4342 while ((size_t)ind < n_regions()) { | |
4343 HeapRegion* hrc = _hrs->at(ind); | |
4344 if (!hrc->continuesHumongous()) break; | |
4345 // Otherwise, does continue the H region. | |
4346 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4347 hrc->hr_clear(par, true /*clear_space*/); | |
4348 cleared_h_regions++; | |
4349 freed_regions++; | |
4350 list->insert_before_head(hrc); | |
4351 ind++; | |
4352 } | |
4353 } else { | |
4354 hr->hr_clear(par, true /*clear_space*/); | |
4355 list->insert_before_head(hr); | |
4356 freed_regions++; | |
4357 // If we're using clear2, this should not be enabled. | |
4358 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4359 } | |
4360 } | |
4361 | |
4362 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4363 size_t cleared_h_regions, | |
4364 size_t freed_regions, | |
4365 UncleanRegionList* list) { | |
4366 if (list != NULL && list->sz() > 0) { | |
4367 prepend_region_list_on_unclean_list(list); | |
4368 } | |
4369 // Acquire a lock, if we're parallel, to update possibly-shared | |
4370 // variables. | |
4371 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4372 { | |
4373 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4374 _summary_bytes_used -= pre_used; | |
4375 _num_humongous_regions -= (int) cleared_h_regions; | |
4376 _free_regions += freed_regions; | |
4377 } | |
4378 } | |
4379 | |
4380 | |
4381 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4382 while (list != NULL) { | |
4383 guarantee( list->is_young(), "invariant" ); | |
4384 | |
4385 HeapWord* bottom = list->bottom(); | |
4386 HeapWord* end = list->end(); | |
4387 MemRegion mr(bottom, end); | |
4388 ct_bs->dirty(mr); | |
4389 | |
4390 list = list->get_next_young_region(); | |
4391 } | |
4392 } | |
4393 | |
4394 void G1CollectedHeap::cleanUpCardTable() { | |
4395 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4396 double start = os::elapsedTime(); | |
4397 | |
4398 ct_bs->clear(_g1_committed); | |
4399 | |
4400 // now, redirty the cards of the scan-only and survivor regions | |
4401 // (it seemed faster to do it this way, instead of iterating over | |
4402 // all regions and then clearing / dirtying as approprite) | |
4403 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); | |
4404 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); | |
4405 | |
4406 double elapsed = os::elapsedTime() - start; | |
4407 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
4408 } | |
4409 | |
4410 | |
4411 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4412 // First do any popular regions. | |
4413 HeapRegion* hr; | |
4414 while ((hr = popular_region_to_evac()) != NULL) { | |
4415 evac_popular_region(hr); | |
4416 } | |
4417 // Now do heuristic pauses. | |
4418 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4419 do_collection_pause(); | |
4420 } | |
4421 } | |
4422 | |
4423 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4424 double young_time_ms = 0.0; | |
4425 double non_young_time_ms = 0.0; | |
4426 | |
4427 G1CollectorPolicy* policy = g1_policy(); | |
4428 | |
4429 double start_sec = os::elapsedTime(); | |
4430 bool non_young = true; | |
4431 | |
4432 HeapRegion* cur = cs_head; | |
4433 int age_bound = -1; | |
4434 size_t rs_lengths = 0; | |
4435 | |
4436 while (cur != NULL) { | |
4437 if (non_young) { | |
4438 if (cur->is_young()) { | |
4439 double end_sec = os::elapsedTime(); | |
4440 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4441 non_young_time_ms += elapsed_ms; | |
4442 | |
4443 start_sec = os::elapsedTime(); | |
4444 non_young = false; | |
4445 } | |
4446 } else { | |
4447 if (!cur->is_on_free_list()) { | |
4448 double end_sec = os::elapsedTime(); | |
4449 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4450 young_time_ms += elapsed_ms; | |
4451 | |
4452 start_sec = os::elapsedTime(); | |
4453 non_young = true; | |
4454 } | |
4455 } | |
4456 | |
4457 rs_lengths += cur->rem_set()->occupied(); | |
4458 | |
4459 HeapRegion* next = cur->next_in_collection_set(); | |
4460 assert(cur->in_collection_set(), "bad CS"); | |
4461 cur->set_next_in_collection_set(NULL); | |
4462 cur->set_in_collection_set(false); | |
4463 | |
4464 if (cur->is_young()) { | |
4465 int index = cur->young_index_in_cset(); | |
4466 guarantee( index != -1, "invariant" ); | |
4467 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4468 size_t words_survived = _surviving_young_words[index]; | |
4469 cur->record_surv_words_in_group(words_survived); | |
4470 } else { | |
4471 int index = cur->young_index_in_cset(); | |
4472 guarantee( index == -1, "invariant" ); | |
4473 } | |
4474 | |
4475 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4476 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4477 "invariant" ); | |
4478 | |
4479 if (!cur->evacuation_failed()) { | |
4480 // And the region is empty. | |
4481 assert(!cur->is_empty(), | |
4482 "Should not have empty regions in a CS."); | |
4483 free_region(cur); | |
4484 } else { | |
4485 guarantee( !cur->is_scan_only(), "should not be scan only" ); | |
4486 cur->uninstall_surv_rate_group(); | |
4487 if (cur->is_young()) | |
4488 cur->set_young_index_in_cset(-1); | |
4489 cur->set_not_young(); | |
4490 cur->set_evacuation_failed(false); | |
4491 } | |
4492 cur = next; | |
4493 } | |
4494 | |
4495 policy->record_max_rs_lengths(rs_lengths); | |
4496 policy->cset_regions_freed(); | |
4497 | |
4498 double end_sec = os::elapsedTime(); | |
4499 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4500 if (non_young) | |
4501 non_young_time_ms += elapsed_ms; | |
4502 else | |
4503 young_time_ms += elapsed_ms; | |
4504 | |
4505 policy->record_young_free_cset_time_ms(young_time_ms); | |
4506 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4507 } | |
4508 | |
4509 HeapRegion* | |
4510 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4511 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4512 HeapRegion* res = pop_unclean_region_list_locked(); | |
4513 if (res != NULL) { | |
4514 assert(!res->continuesHumongous() && | |
4515 res->zero_fill_state() != HeapRegion::Allocated, | |
4516 "Only free regions on unclean list."); | |
4517 if (zero_filled) { | |
4518 res->ensure_zero_filled_locked(); | |
4519 res->set_zero_fill_allocated(); | |
4520 } | |
4521 } | |
4522 return res; | |
4523 } | |
4524 | |
4525 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4526 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4527 return alloc_region_from_unclean_list_locked(zero_filled); | |
4528 } | |
4529 | |
4530 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4531 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4532 put_region_on_unclean_list_locked(r); | |
4533 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4534 } | |
4535 | |
4536 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4537 MutexLockerEx x(Cleanup_mon); | |
4538 set_unclean_regions_coming_locked(b); | |
4539 } | |
4540 | |
4541 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4542 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4543 _unclean_regions_coming = b; | |
4544 // Wake up mutator threads that might be waiting for completeCleanup to | |
4545 // finish. | |
4546 if (!b) Cleanup_mon->notify_all(); | |
4547 } | |
4548 | |
4549 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4550 MutexLockerEx x(Cleanup_mon); | |
4551 wait_for_cleanup_complete_locked(); | |
4552 } | |
4553 | |
4554 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4555 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4556 while (_unclean_regions_coming) { | |
4557 Cleanup_mon->wait(); | |
4558 } | |
4559 } | |
4560 | |
4561 void | |
4562 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4563 assert(ZF_mon->owned_by_self(), "precondition."); | |
4564 _unclean_region_list.insert_before_head(r); | |
4565 } | |
4566 | |
4567 void | |
4568 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4569 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4570 prepend_region_list_on_unclean_list_locked(list); | |
4571 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4572 } | |
4573 | |
4574 void | |
4575 G1CollectedHeap:: | |
4576 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4577 assert(ZF_mon->owned_by_self(), "precondition."); | |
4578 _unclean_region_list.prepend_list(list); | |
4579 } | |
4580 | |
4581 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4582 assert(ZF_mon->owned_by_self(), "precondition."); | |
4583 HeapRegion* res = _unclean_region_list.pop(); | |
4584 if (res != NULL) { | |
4585 // Inform ZF thread that there's a new unclean head. | |
4586 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4587 ZF_mon->notify_all(); | |
4588 } | |
4589 return res; | |
4590 } | |
4591 | |
4592 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4593 assert(ZF_mon->owned_by_self(), "precondition."); | |
4594 return _unclean_region_list.hd(); | |
4595 } | |
4596 | |
4597 | |
4598 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4599 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4600 HeapRegion* r = peek_unclean_region_list_locked(); | |
4601 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
4602 // Result of below must be equal to "r", since we hold the lock. | |
4603 (void)pop_unclean_region_list_locked(); | |
4604 put_free_region_on_list_locked(r); | |
4605 return true; | |
4606 } else { | |
4607 return false; | |
4608 } | |
4609 } | |
4610 | |
4611 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
4612 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4613 return move_cleaned_region_to_free_list_locked(); | |
4614 } | |
4615 | |
4616 | |
4617 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
4618 assert(ZF_mon->owned_by_self(), "precondition."); | |
4619 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4620 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
4621 "Regions on free list must be zero filled"); | |
4622 assert(!r->isHumongous(), "Must not be humongous."); | |
4623 assert(r->is_empty(), "Better be empty"); | |
4624 assert(!r->is_on_free_list(), | |
4625 "Better not already be on free list"); | |
4626 assert(!r->is_on_unclean_list(), | |
4627 "Better not already be on unclean list"); | |
4628 r->set_on_free_list(true); | |
4629 r->set_next_on_free_list(_free_region_list); | |
4630 _free_region_list = r; | |
4631 _free_region_list_size++; | |
4632 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4633 } | |
4634 | |
4635 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
4636 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4637 put_free_region_on_list_locked(r); | |
4638 } | |
4639 | |
4640 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
4641 assert(ZF_mon->owned_by_self(), "precondition."); | |
4642 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4643 HeapRegion* res = _free_region_list; | |
4644 if (res != NULL) { | |
4645 _free_region_list = res->next_from_free_list(); | |
4646 _free_region_list_size--; | |
4647 res->set_on_free_list(false); | |
4648 res->set_next_on_free_list(NULL); | |
4649 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4650 } | |
4651 return res; | |
4652 } | |
4653 | |
4654 | |
4655 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
4656 // By self, or on behalf of self. | |
4657 assert(Heap_lock->is_locked(), "Precondition"); | |
4658 HeapRegion* res = NULL; | |
4659 bool first = true; | |
4660 while (res == NULL) { | |
4661 if (zero_filled || !first) { | |
4662 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4663 res = pop_free_region_list_locked(); | |
4664 if (res != NULL) { | |
4665 assert(!res->zero_fill_is_allocated(), | |
4666 "No allocated regions on free list."); | |
4667 res->set_zero_fill_allocated(); | |
4668 } else if (!first) { | |
4669 break; // We tried both, time to return NULL. | |
4670 } | |
4671 } | |
4672 | |
4673 if (res == NULL) { | |
4674 res = alloc_region_from_unclean_list(zero_filled); | |
4675 } | |
4676 assert(res == NULL || | |
4677 !zero_filled || | |
4678 res->zero_fill_is_allocated(), | |
4679 "We must have allocated the region we're returning"); | |
4680 first = false; | |
4681 } | |
4682 return res; | |
4683 } | |
4684 | |
4685 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
4686 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4687 { | |
4688 HeapRegion* prev = NULL; | |
4689 HeapRegion* cur = _unclean_region_list.hd(); | |
4690 while (cur != NULL) { | |
4691 HeapRegion* next = cur->next_from_unclean_list(); | |
4692 if (cur->zero_fill_is_allocated()) { | |
4693 // Remove from the list. | |
4694 if (prev == NULL) { | |
4695 (void)_unclean_region_list.pop(); | |
4696 } else { | |
4697 _unclean_region_list.delete_after(prev); | |
4698 } | |
4699 cur->set_on_unclean_list(false); | |
4700 cur->set_next_on_unclean_list(NULL); | |
4701 } else { | |
4702 prev = cur; | |
4703 } | |
4704 cur = next; | |
4705 } | |
4706 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
4707 "Inv"); | |
4708 } | |
4709 | |
4710 { | |
4711 HeapRegion* prev = NULL; | |
4712 HeapRegion* cur = _free_region_list; | |
4713 while (cur != NULL) { | |
4714 HeapRegion* next = cur->next_from_free_list(); | |
4715 if (cur->zero_fill_is_allocated()) { | |
4716 // Remove from the list. | |
4717 if (prev == NULL) { | |
4718 _free_region_list = cur->next_from_free_list(); | |
4719 } else { | |
4720 prev->set_next_on_free_list(cur->next_from_free_list()); | |
4721 } | |
4722 cur->set_on_free_list(false); | |
4723 cur->set_next_on_free_list(NULL); | |
4724 _free_region_list_size--; | |
4725 } else { | |
4726 prev = cur; | |
4727 } | |
4728 cur = next; | |
4729 } | |
4730 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4731 } | |
4732 } | |
4733 | |
4734 bool G1CollectedHeap::verify_region_lists() { | |
4735 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4736 return verify_region_lists_locked(); | |
4737 } | |
4738 | |
4739 bool G1CollectedHeap::verify_region_lists_locked() { | |
4740 HeapRegion* unclean = _unclean_region_list.hd(); | |
4741 while (unclean != NULL) { | |
4742 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
4743 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
4744 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
4745 "Everything else is possible."); | |
4746 unclean = unclean->next_from_unclean_list(); | |
4747 } | |
4748 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
4749 | |
4750 HeapRegion* free_r = _free_region_list; | |
4751 while (free_r != NULL) { | |
4752 assert(free_r->is_on_free_list(), "Well, it is!"); | |
4753 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
4754 switch (free_r->zero_fill_state()) { | |
4755 case HeapRegion::NotZeroFilled: | |
4756 case HeapRegion::ZeroFilling: | |
4757 guarantee(false, "Should not be on free list."); | |
4758 break; | |
4759 default: | |
4760 // Everything else is possible. | |
4761 break; | |
4762 } | |
4763 free_r = free_r->next_from_free_list(); | |
4764 } | |
4765 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
4766 // If we didn't do an assertion... | |
4767 return true; | |
4768 } | |
4769 | |
4770 size_t G1CollectedHeap::free_region_list_length() { | |
4771 assert(ZF_mon->owned_by_self(), "precondition."); | |
4772 size_t len = 0; | |
4773 HeapRegion* cur = _free_region_list; | |
4774 while (cur != NULL) { | |
4775 len++; | |
4776 cur = cur->next_from_free_list(); | |
4777 } | |
4778 return len; | |
4779 } | |
4780 | |
4781 size_t G1CollectedHeap::unclean_region_list_length() { | |
4782 assert(ZF_mon->owned_by_self(), "precondition."); | |
4783 return _unclean_region_list.length(); | |
4784 } | |
4785 | |
4786 size_t G1CollectedHeap::n_regions() { | |
4787 return _hrs->length(); | |
4788 } | |
4789 | |
4790 size_t G1CollectedHeap::max_regions() { | |
4791 return | |
4792 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
4793 HeapRegion::GrainBytes; | |
4794 } | |
4795 | |
4796 size_t G1CollectedHeap::free_regions() { | |
4797 /* Possibly-expensive assert. | |
4798 assert(_free_regions == count_free_regions(), | |
4799 "_free_regions is off."); | |
4800 */ | |
4801 return _free_regions; | |
4802 } | |
4803 | |
4804 bool G1CollectedHeap::should_zf() { | |
4805 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
4806 } | |
4807 | |
4808 class RegionCounter: public HeapRegionClosure { | |
4809 size_t _n; | |
4810 public: | |
4811 RegionCounter() : _n(0) {} | |
4812 bool doHeapRegion(HeapRegion* r) { | |
4813 if (r->is_empty() && !r->popular()) { | |
4814 assert(!r->isHumongous(), "H regions should not be empty."); | |
4815 _n++; | |
4816 } | |
4817 return false; | |
4818 } | |
4819 int res() { return (int) _n; } | |
4820 }; | |
4821 | |
4822 size_t G1CollectedHeap::count_free_regions() { | |
4823 RegionCounter rc; | |
4824 heap_region_iterate(&rc); | |
4825 size_t n = rc.res(); | |
4826 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
4827 n--; | |
4828 return n; | |
4829 } | |
4830 | |
4831 size_t G1CollectedHeap::count_free_regions_list() { | |
4832 size_t n = 0; | |
4833 size_t o = 0; | |
4834 ZF_mon->lock_without_safepoint_check(); | |
4835 HeapRegion* cur = _free_region_list; | |
4836 while (cur != NULL) { | |
4837 cur = cur->next_from_free_list(); | |
4838 n++; | |
4839 } | |
4840 size_t m = unclean_region_list_length(); | |
4841 ZF_mon->unlock(); | |
4842 return n + m; | |
4843 } | |
4844 | |
4845 bool G1CollectedHeap::should_set_young_locked() { | |
4846 assert(heap_lock_held_for_gc(), | |
4847 "the heap lock should already be held by or for this thread"); | |
4848 return (g1_policy()->in_young_gc_mode() && | |
4849 g1_policy()->should_add_next_region_to_young_list()); | |
4850 } | |
4851 | |
4852 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
4853 assert(heap_lock_held_for_gc(), | |
4854 "the heap lock should already be held by or for this thread"); | |
4855 _young_list->push_region(hr); | |
4856 g1_policy()->set_region_short_lived(hr); | |
4857 } | |
4858 | |
4859 class NoYoungRegionsClosure: public HeapRegionClosure { | |
4860 private: | |
4861 bool _success; | |
4862 public: | |
4863 NoYoungRegionsClosure() : _success(true) { } | |
4864 bool doHeapRegion(HeapRegion* r) { | |
4865 if (r->is_young()) { | |
4866 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
4867 r->bottom(), r->end()); | |
4868 _success = false; | |
4869 } | |
4870 return false; | |
4871 } | |
4872 bool success() { return _success; } | |
4873 }; | |
4874 | |
4875 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, | |
4876 bool check_sample) { | |
4877 bool ret = true; | |
4878 | |
4879 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); | |
4880 if (!ignore_scan_only_list) { | |
4881 NoYoungRegionsClosure closure; | |
4882 heap_region_iterate(&closure); | |
4883 ret = ret && closure.success(); | |
4884 } | |
4885 | |
4886 return ret; | |
4887 } | |
4888 | |
4889 void G1CollectedHeap::empty_young_list() { | |
4890 assert(heap_lock_held_for_gc(), | |
4891 "the heap lock should already be held by or for this thread"); | |
4892 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
4893 | |
4894 _young_list->empty_list(); | |
4895 } | |
4896 | |
4897 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
4898 bool no_allocs = true; | |
4899 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
4900 HeapRegion* r = _gc_alloc_regions[ap]; | |
4901 no_allocs = r == NULL || r->saved_mark_at_top(); | |
4902 } | |
4903 return no_allocs; | |
4904 } | |
4905 | |
4906 void G1CollectedHeap::all_alloc_regions_note_end_of_copying() { | |
4907 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
4908 HeapRegion* r = _gc_alloc_regions[ap]; | |
4909 if (r != NULL) { | |
4910 // Check for aliases. | |
4911 bool has_processed_alias = false; | |
4912 for (int i = 0; i < ap; ++i) { | |
4913 if (_gc_alloc_regions[i] == r) { | |
4914 has_processed_alias = true; | |
4915 break; | |
4916 } | |
4917 } | |
4918 if (!has_processed_alias) { | |
4919 r->note_end_of_copying(); | |
4920 g1_policy()->record_after_bytes(r->used()); | |
4921 } | |
4922 } | |
4923 } | |
4924 } | |
4925 | |
4926 | |
4927 // Done at the start of full GC. | |
4928 void G1CollectedHeap::tear_down_region_lists() { | |
4929 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4930 while (pop_unclean_region_list_locked() != NULL) ; | |
4931 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
4932 "Postconditions of loop.") | |
4933 while (pop_free_region_list_locked() != NULL) ; | |
4934 assert(_free_region_list == NULL, "Postcondition of loop."); | |
4935 if (_free_region_list_size != 0) { | |
4936 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
4937 print(); | |
4938 } | |
4939 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
4940 } | |
4941 | |
4942 | |
4943 class RegionResetter: public HeapRegionClosure { | |
4944 G1CollectedHeap* _g1; | |
4945 int _n; | |
4946 public: | |
4947 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
4948 bool doHeapRegion(HeapRegion* r) { | |
4949 if (r->continuesHumongous()) return false; | |
4950 if (r->top() > r->bottom()) { | |
4951 if (r->top() < r->end()) { | |
4952 Copy::fill_to_words(r->top(), | |
4953 pointer_delta(r->end(), r->top())); | |
4954 } | |
4955 r->set_zero_fill_allocated(); | |
4956 } else { | |
4957 assert(r->is_empty(), "tautology"); | |
4958 if (r->popular()) { | |
4959 if (r->zero_fill_state() != HeapRegion::Allocated) { | |
4960 r->ensure_zero_filled_locked(); | |
4961 r->set_zero_fill_allocated(); | |
4962 } | |
4963 } else { | |
4964 _n++; | |
4965 switch (r->zero_fill_state()) { | |
4966 case HeapRegion::NotZeroFilled: | |
4967 case HeapRegion::ZeroFilling: | |
4968 _g1->put_region_on_unclean_list_locked(r); | |
4969 break; | |
4970 case HeapRegion::Allocated: | |
4971 r->set_zero_fill_complete(); | |
4972 // no break; go on to put on free list. | |
4973 case HeapRegion::ZeroFilled: | |
4974 _g1->put_free_region_on_list_locked(r); | |
4975 break; | |
4976 } | |
4977 } | |
4978 } | |
4979 return false; | |
4980 } | |
4981 | |
4982 int getFreeRegionCount() {return _n;} | |
4983 }; | |
4984 | |
4985 // Done at the end of full GC. | |
4986 void G1CollectedHeap::rebuild_region_lists() { | |
4987 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4988 // This needs to go at the end of the full GC. | |
4989 RegionResetter rs; | |
4990 heap_region_iterate(&rs); | |
4991 _free_regions = rs.getFreeRegionCount(); | |
4992 // Tell the ZF thread it may have work to do. | |
4993 if (should_zf()) ZF_mon->notify_all(); | |
4994 } | |
4995 | |
4996 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
4997 G1CollectedHeap* _g1; | |
4998 int _n; | |
4999 public: | |
5000 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5001 bool doHeapRegion(HeapRegion* r) { | |
5002 if (r->continuesHumongous()) return false; | |
5003 if (r->top() > r->bottom()) { | |
5004 // There are assertions in "set_zero_fill_needed()" below that | |
5005 // require top() == bottom(), so this is technically illegal. | |
5006 // We'll skirt the law here, by making that true temporarily. | |
5007 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5008 r->set_top(r->bottom())); | |
5009 r->set_zero_fill_needed(); | |
5010 DEBUG_ONLY(r->set_top(save_top)); | |
5011 } | |
5012 return false; | |
5013 } | |
5014 }; | |
5015 | |
5016 // Done at the start of full GC. | |
5017 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5018 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5019 // This needs to go at the end of the full GC. | |
5020 UsedRegionsNeedZeroFillSetter rs; | |
5021 heap_region_iterate(&rs); | |
5022 } | |
5023 | |
5024 class CountObjClosure: public ObjectClosure { | |
5025 size_t _n; | |
5026 public: | |
5027 CountObjClosure() : _n(0) {} | |
5028 void do_object(oop obj) { _n++; } | |
5029 size_t n() { return _n; } | |
5030 }; | |
5031 | |
5032 size_t G1CollectedHeap::pop_object_used_objs() { | |
5033 size_t sum_objs = 0; | |
5034 for (int i = 0; i < G1NumPopularRegions; i++) { | |
5035 CountObjClosure cl; | |
5036 _hrs->at(i)->object_iterate(&cl); | |
5037 sum_objs += cl.n(); | |
5038 } | |
5039 return sum_objs; | |
5040 } | |
5041 | |
5042 size_t G1CollectedHeap::pop_object_used_bytes() { | |
5043 size_t sum_bytes = 0; | |
5044 for (int i = 0; i < G1NumPopularRegions; i++) { | |
5045 sum_bytes += _hrs->at(i)->used(); | |
5046 } | |
5047 return sum_bytes; | |
5048 } | |
5049 | |
5050 | |
5051 static int nq = 0; | |
5052 | |
5053 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) { | |
5054 while (_cur_pop_hr_index < G1NumPopularRegions) { | |
5055 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); | |
5056 HeapWord* res = cur_pop_region->allocate(word_size); | |
5057 if (res != NULL) { | |
5058 // We account for popular objs directly in the used summary: | |
5059 _summary_bytes_used += (word_size * HeapWordSize); | |
5060 return res; | |
5061 } | |
5062 // Otherwise, try the next region (first making sure that we remember | |
5063 // the last "top" value as the "next_top_at_mark_start", so that | |
5064 // objects made popular during markings aren't automatically considered | |
5065 // live). | |
5066 cur_pop_region->note_end_of_copying(); | |
5067 // Otherwise, try the next region. | |
5068 _cur_pop_hr_index++; | |
5069 } | |
5070 // XXX: For now !!! | |
5071 vm_exit_out_of_memory(word_size, | |
5072 "Not enough pop obj space (To Be Fixed)"); | |
5073 return NULL; | |
5074 } | |
5075 | |
5076 class HeapRegionList: public CHeapObj { | |
5077 public: | |
5078 HeapRegion* hr; | |
5079 HeapRegionList* next; | |
5080 }; | |
5081 | |
5082 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) { | |
5083 // This might happen during parallel GC, so protect by this lock. | |
5084 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
5085 // We don't schedule regions whose evacuations are already pending, or | |
5086 // are already being evacuated. | |
5087 if (!r->popular_pending() && !r->in_collection_set()) { | |
5088 r->set_popular_pending(true); | |
5089 if (G1TracePopularity) { | |
5090 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" " | |
5091 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.", | |
5092 r, r->bottom(), r->end()); | |
5093 } | |
5094 HeapRegionList* hrl = new HeapRegionList; | |
5095 hrl->hr = r; | |
5096 hrl->next = _popular_regions_to_be_evacuated; | |
5097 _popular_regions_to_be_evacuated = hrl; | |
5098 } | |
5099 } | |
5100 | |
5101 HeapRegion* G1CollectedHeap::popular_region_to_evac() { | |
5102 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
5103 HeapRegion* res = NULL; | |
5104 while (_popular_regions_to_be_evacuated != NULL && res == NULL) { | |
5105 HeapRegionList* hrl = _popular_regions_to_be_evacuated; | |
5106 _popular_regions_to_be_evacuated = hrl->next; | |
5107 res = hrl->hr; | |
5108 // The G1RSPopLimit may have increased, so recheck here... | |
5109 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) { | |
5110 // Hah: don't need to schedule. | |
5111 if (G1TracePopularity) { | |
5112 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" " | |
5113 "["PTR_FORMAT", "PTR_FORMAT") " | |
5114 "for pop-object evacuation (size %d < limit %d)", | |
5115 res, res->bottom(), res->end(), | |
5116 res->rem_set()->occupied(), G1RSPopLimit); | |
5117 } | |
5118 res->set_popular_pending(false); | |
5119 res = NULL; | |
5120 } | |
5121 // We do not reset res->popular() here; if we did so, it would allow | |
5122 // the region to be "rescheduled" for popularity evacuation. Instead, | |
5123 // this is done in the collection pause, with the world stopped. | |
5124 // So the invariant is that the regions in the list have the popularity | |
5125 // boolean set, but having the boolean set does not imply membership | |
5126 // on the list (though there can at most one such pop-pending region | |
5127 // not on the list at any time). | |
5128 delete hrl; | |
5129 } | |
5130 return res; | |
5131 } | |
5132 | |
5133 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) { | |
5134 while (true) { | |
5135 // Don't want to do a GC pause while cleanup is being completed! | |
5136 wait_for_cleanup_complete(); | |
5137 | |
5138 // Read the GC count while holding the Heap_lock | |
5139 int gc_count_before = SharedHeap::heap()->total_collections(); | |
5140 g1_policy()->record_stop_world_start(); | |
5141 | |
5142 { | |
5143 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
5144 VM_G1PopRegionCollectionPause op(gc_count_before, hr); | |
5145 VMThread::execute(&op); | |
5146 | |
5147 // If the prolog succeeded, we didn't do a GC for this. | |
5148 if (op.prologue_succeeded()) break; | |
5149 } | |
5150 // Otherwise we didn't. We should recheck the size, though, since | |
5151 // the limit may have increased... | |
5152 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) { | |
5153 hr->set_popular_pending(false); | |
5154 break; | |
5155 } | |
5156 } | |
5157 } | |
5158 | |
5159 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) { | |
5160 Atomic::inc(obj_rc_addr(obj)); | |
5161 } | |
5162 | |
5163 class CountRCClosure: public OopsInHeapRegionClosure { | |
5164 G1CollectedHeap* _g1h; | |
5165 bool _parallel; | |
5166 public: | |
5167 CountRCClosure(G1CollectedHeap* g1h) : | |
5168 _g1h(g1h), _parallel(ParallelGCThreads > 0) | |
5169 {} | |
5170 void do_oop(narrowOop* p) { | |
5171 guarantee(false, "NYI"); | |
5172 } | |
5173 void do_oop(oop* p) { | |
5174 oop obj = *p; | |
5175 assert(obj != NULL, "Precondition."); | |
5176 if (_parallel) { | |
5177 // We go sticky at the limit to avoid excess contention. | |
5178 // If we want to track the actual RC's further, we'll need to keep a | |
5179 // per-thread hash table or something for the popular objects. | |
5180 if (_g1h->obj_rc(obj) < G1ObjPopLimit) { | |
5181 _g1h->atomic_inc_obj_rc(obj); | |
5182 } | |
5183 } else { | |
5184 _g1h->inc_obj_rc(obj); | |
5185 } | |
5186 } | |
5187 }; | |
5188 | |
5189 class EvacPopObjClosure: public ObjectClosure { | |
5190 G1CollectedHeap* _g1h; | |
5191 size_t _pop_objs; | |
5192 size_t _max_rc; | |
5193 public: | |
5194 EvacPopObjClosure(G1CollectedHeap* g1h) : | |
5195 _g1h(g1h), _pop_objs(0), _max_rc(0) {} | |
5196 | |
5197 void do_object(oop obj) { | |
5198 size_t rc = _g1h->obj_rc(obj); | |
5199 _max_rc = MAX2(rc, _max_rc); | |
5200 if (rc >= (size_t) G1ObjPopLimit) { | |
5201 _g1h->_pop_obj_rc_at_copy.add((double)rc); | |
5202 size_t word_sz = obj->size(); | |
5203 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz); | |
5204 oop new_pop_obj = (oop)new_pop_loc; | |
5205 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz); | |
5206 obj->forward_to(new_pop_obj); | |
5207 G1ScanAndBalanceClosure scan_and_balance(_g1h); | |
5208 new_pop_obj->oop_iterate_backwards(&scan_and_balance); | |
5209 // preserve "next" mark bit if marking is in progress. | |
5210 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) { | |
5211 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj); | |
5212 } | |
5213 | |
5214 if (G1TracePopularity) { | |
5215 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT | |
5216 " pop (%d), move to " PTR_FORMAT, | |
5217 (void*) obj, word_sz, | |
5218 _g1h->obj_rc(obj), (void*) new_pop_obj); | |
5219 } | |
5220 _pop_objs++; | |
5221 } | |
5222 } | |
5223 size_t pop_objs() { return _pop_objs; } | |
5224 size_t max_rc() { return _max_rc; } | |
5225 }; | |
5226 | |
5227 class G1ParCountRCTask : public AbstractGangTask { | |
5228 G1CollectedHeap* _g1h; | |
5229 BitMap _bm; | |
5230 | |
5231 size_t getNCards() { | |
5232 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
5233 / G1BlockOffsetSharedArray::N_bytes; | |
5234 } | |
5235 CountRCClosure _count_rc_closure; | |
5236 public: | |
5237 G1ParCountRCTask(G1CollectedHeap* g1h) : | |
5238 AbstractGangTask("G1 Par RC Count task"), | |
5239 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h) | |
5240 {} | |
5241 | |
5242 void work(int i) { | |
5243 ResourceMark rm; | |
5244 HandleMark hm; | |
5245 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i); | |
5246 } | |
5247 }; | |
5248 | |
5249 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) { | |
5250 // We're evacuating a single region (for popularity). | |
5251 if (G1TracePopularity) { | |
5252 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")", | |
5253 popular_region->bottom(), popular_region->end()); | |
5254 } | |
5255 g1_policy()->set_single_region_collection_set(popular_region); | |
5256 size_t max_rc; | |
5257 if (!compute_reference_counts_and_evac_popular(popular_region, | |
5258 &max_rc)) { | |
5259 // We didn't evacuate any popular objects. | |
5260 // We increase the RS popularity limit, to prevent this from | |
5261 // happening in the future. | |
5262 if (G1RSPopLimit < (1 << 30)) { | |
5263 G1RSPopLimit *= 2; | |
5264 } | |
5265 // For now, interesting enough for a message: | |
5266 #if 1 | |
5267 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), " | |
5268 "failed to find a pop object (max = %d).", | |
5269 popular_region->bottom(), popular_region->end(), | |
5270 max_rc); | |
5271 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit); | |
5272 #endif // 0 | |
5273 // Also, we reset the collection set to NULL, to make the rest of | |
5274 // the collection do nothing. | |
5275 assert(popular_region->next_in_collection_set() == NULL, | |
5276 "should be single-region."); | |
5277 popular_region->set_in_collection_set(false); | |
5278 popular_region->set_popular_pending(false); | |
5279 g1_policy()->clear_collection_set(); | |
5280 } | |
5281 } | |
5282 | |
5283 bool G1CollectedHeap:: | |
5284 compute_reference_counts_and_evac_popular(HeapRegion* popular_region, | |
5285 size_t* max_rc) { | |
5286 HeapWord* rc_region_bot; | |
5287 HeapWord* rc_region_end; | |
5288 | |
5289 // Set up the reference count region. | |
5290 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords); | |
5291 if (rc_region != NULL) { | |
5292 rc_region_bot = rc_region->bottom(); | |
5293 rc_region_end = rc_region->end(); | |
5294 } else { | |
5295 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords); | |
5296 if (rc_region_bot == NULL) { | |
5297 vm_exit_out_of_memory(HeapRegion::GrainWords, | |
5298 "No space for RC region."); | |
5299 } | |
5300 rc_region_end = rc_region_bot + HeapRegion::GrainWords; | |
5301 } | |
5302 | |
5303 if (G1TracePopularity) | |
5304 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")", | |
5305 rc_region_bot, rc_region_end); | |
5306 if (rc_region_bot > popular_region->bottom()) { | |
5307 _rc_region_above = true; | |
5308 _rc_region_diff = | |
5309 pointer_delta(rc_region_bot, popular_region->bottom(), 1); | |
5310 } else { | |
5311 assert(rc_region_bot < popular_region->bottom(), "Can't be equal."); | |
5312 _rc_region_above = false; | |
5313 _rc_region_diff = | |
5314 pointer_delta(popular_region->bottom(), rc_region_bot, 1); | |
5315 } | |
5316 g1_policy()->record_pop_compute_rc_start(); | |
5317 // Count external references. | |
5318 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
5319 if (ParallelGCThreads > 0) { | |
5320 | |
5321 set_par_threads(workers()->total_workers()); | |
5322 G1ParCountRCTask par_count_rc_task(this); | |
5323 workers()->run_task(&par_count_rc_task); | |
5324 set_par_threads(0); | |
5325 | |
5326 } else { | |
5327 CountRCClosure count_rc_closure(this); | |
5328 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0); | |
5329 } | |
5330 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | |
5331 g1_policy()->record_pop_compute_rc_end(); | |
5332 | |
5333 // Now evacuate popular objects. | |
5334 g1_policy()->record_pop_evac_start(); | |
5335 EvacPopObjClosure evac_pop_obj_cl(this); | |
5336 popular_region->object_iterate(&evac_pop_obj_cl); | |
5337 *max_rc = evac_pop_obj_cl.max_rc(); | |
5338 | |
5339 // Make sure the last "top" value of the current popular region is copied | |
5340 // as the "next_top_at_mark_start", so that objects made popular during | |
5341 // markings aren't automatically considered live. | |
5342 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); | |
5343 cur_pop_region->note_end_of_copying(); | |
5344 | |
5345 if (rc_region != NULL) { | |
5346 free_region(rc_region); | |
5347 } else { | |
5348 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot); | |
5349 } | |
5350 g1_policy()->record_pop_evac_end(); | |
5351 | |
5352 return evac_pop_obj_cl.pop_objs() > 0; | |
5353 } | |
5354 | |
5355 class CountPopObjInfoClosure: public HeapRegionClosure { | |
5356 size_t _objs; | |
5357 size_t _bytes; | |
5358 | |
5359 class CountObjClosure: public ObjectClosure { | |
5360 int _n; | |
5361 public: | |
5362 CountObjClosure() : _n(0) {} | |
5363 void do_object(oop obj) { _n++; } | |
5364 size_t n() { return _n; } | |
5365 }; | |
5366 | |
5367 public: | |
5368 CountPopObjInfoClosure() : _objs(0), _bytes(0) {} | |
5369 bool doHeapRegion(HeapRegion* r) { | |
5370 _bytes += r->used(); | |
5371 CountObjClosure blk; | |
5372 r->object_iterate(&blk); | |
5373 _objs += blk.n(); | |
5374 return false; | |
5375 } | |
5376 size_t objs() { return _objs; } | |
5377 size_t bytes() { return _bytes; } | |
5378 }; | |
5379 | |
5380 | |
5381 void G1CollectedHeap::print_popularity_summary_info() const { | |
5382 CountPopObjInfoClosure blk; | |
5383 for (int i = 0; i <= _cur_pop_hr_index; i++) { | |
5384 blk.doHeapRegion(_hrs->at(i)); | |
5385 } | |
5386 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.", | |
5387 blk.objs(), blk.bytes()); | |
5388 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].", | |
5389 _pop_obj_rc_at_copy.avg(), | |
5390 _pop_obj_rc_at_copy.maximum(), | |
5391 _pop_obj_rc_at_copy.sd()); | |
5392 } | |
5393 | |
5394 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5395 _refine_cte_cl->set_concurrent(concurrent); | |
5396 } | |
5397 | |
5398 #ifndef PRODUCT | |
5399 | |
5400 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5401 public: | |
5402 bool doHeapRegion(HeapRegion *r) { | |
5403 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5404 if (r != NULL) { | |
5405 if (r->is_on_free_list()) | |
5406 gclog_or_tty->print("Free "); | |
5407 if (r->is_young()) | |
5408 gclog_or_tty->print("Young "); | |
5409 if (r->isHumongous()) | |
5410 gclog_or_tty->print("Is Humongous "); | |
5411 r->print(); | |
5412 } | |
5413 return false; | |
5414 } | |
5415 }; | |
5416 | |
5417 class SortHeapRegionClosure : public HeapRegionClosure { | |
5418 size_t young_regions,free_regions, unclean_regions; | |
5419 size_t hum_regions, count; | |
5420 size_t unaccounted, cur_unclean, cur_alloc; | |
5421 size_t total_free; | |
5422 HeapRegion* cur; | |
5423 public: | |
5424 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5425 free_regions(0), unclean_regions(0), | |
5426 hum_regions(0), | |
5427 count(0), unaccounted(0), | |
5428 cur_alloc(0), total_free(0) | |
5429 {} | |
5430 bool doHeapRegion(HeapRegion *r) { | |
5431 count++; | |
5432 if (r->is_on_free_list()) free_regions++; | |
5433 else if (r->is_on_unclean_list()) unclean_regions++; | |
5434 else if (r->isHumongous()) hum_regions++; | |
5435 else if (r->is_young()) young_regions++; | |
5436 else if (r == cur) cur_alloc++; | |
5437 else unaccounted++; | |
5438 return false; | |
5439 } | |
5440 void print() { | |
5441 total_free = free_regions + unclean_regions; | |
5442 gclog_or_tty->print("%d regions\n", count); | |
5443 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5444 total_free, free_regions, unclean_regions); | |
5445 gclog_or_tty->print("%d humongous %d young\n", | |
5446 hum_regions, young_regions); | |
5447 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5448 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5449 } | |
5450 }; | |
5451 | |
5452 void G1CollectedHeap::print_region_counts() { | |
5453 SortHeapRegionClosure sc(_cur_alloc_region); | |
5454 PrintHeapRegionClosure cl; | |
5455 heap_region_iterate(&cl); | |
5456 heap_region_iterate(&sc); | |
5457 sc.print(); | |
5458 print_region_accounting_info(); | |
5459 }; | |
5460 | |
5461 bool G1CollectedHeap::regions_accounted_for() { | |
5462 // TODO: regions accounting for young/survivor/tenured | |
5463 return true; | |
5464 } | |
5465 | |
5466 bool G1CollectedHeap::print_region_accounting_info() { | |
5467 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions); | |
5468 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5469 free_regions(), | |
5470 count_free_regions(), count_free_regions_list(), | |
5471 _free_region_list_size, _unclean_region_list.sz()); | |
5472 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5473 (_cur_alloc_region == NULL ? 0 : 1)); | |
5474 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5475 | |
5476 // TODO: check regions accounting for young/survivor/tenured | |
5477 return true; | |
5478 } | |
5479 | |
5480 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5481 HeapRegion* hr = heap_region_containing(p); | |
5482 if (hr == NULL) { | |
5483 return is_in_permanent(p); | |
5484 } else { | |
5485 return hr->is_in(p); | |
5486 } | |
5487 } | |
5488 #endif // PRODUCT | |
5489 | |
5490 void G1CollectedHeap::g1_unimplemented() { | |
5491 // Unimplemented(); | |
5492 } | |
5493 | |
5494 | |
5495 // Local Variables: *** | |
5496 // c-indentation-style: gnu *** | |
5497 // End: *** |