Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 078b8a0d8d7c |
children | 27a80744a83b |
rev | line source |
---|---|
342 | 1 /* |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_g1CollectedHeap.cpp.incl" | |
27 | |
28 // turn it on so that the contents of the young list (scan-only / | |
29 // to-be-collected) are printed at "strategic" points before / during | |
30 // / after the collection --- this is useful for debugging | |
31 #define SCAN_ONLY_VERBOSE 0 | |
32 // CURRENT STATUS | |
33 // This file is under construction. Search for "FIXME". | |
34 | |
35 // INVARIANTS/NOTES | |
36 // | |
37 // All allocation activity covered by the G1CollectedHeap interface is | |
38 // serialized by acquiring the HeapLock. This happens in | |
39 // mem_allocate_work, which all such allocation functions call. | |
40 // (Note that this does not apply to TLAB allocation, which is not part | |
41 // of this interface: it is done by clients of this interface.) | |
42 | |
43 // Local to this file. | |
44 | |
45 // Finds the first HeapRegion. | |
46 // No longer used, but might be handy someday. | |
47 | |
48 class FindFirstRegionClosure: public HeapRegionClosure { | |
49 HeapRegion* _a_region; | |
50 public: | |
51 FindFirstRegionClosure() : _a_region(NULL) {} | |
52 bool doHeapRegion(HeapRegion* r) { | |
53 _a_region = r; | |
54 return true; | |
55 } | |
56 HeapRegion* result() { return _a_region; } | |
57 }; | |
58 | |
59 | |
60 class RefineCardTableEntryClosure: public CardTableEntryClosure { | |
61 SuspendibleThreadSet* _sts; | |
62 G1RemSet* _g1rs; | |
63 ConcurrentG1Refine* _cg1r; | |
64 bool _concurrent; | |
65 public: | |
66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, | |
67 G1RemSet* g1rs, | |
68 ConcurrentG1Refine* cg1r) : | |
69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) | |
70 {} | |
71 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); | |
73 if (_concurrent && _sts->should_yield()) { | |
74 // Caller will actually yield. | |
75 return false; | |
76 } | |
77 // Otherwise, we finished successfully; return true. | |
78 return true; | |
79 } | |
80 void set_concurrent(bool b) { _concurrent = b; } | |
81 }; | |
82 | |
83 | |
84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
85 int _calls; | |
86 G1CollectedHeap* _g1h; | |
87 CardTableModRefBS* _ctbs; | |
88 int _histo[256]; | |
89 public: | |
90 ClearLoggedCardTableEntryClosure() : | |
91 _calls(0) | |
92 { | |
93 _g1h = G1CollectedHeap::heap(); | |
94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
95 for (int i = 0; i < 256; i++) _histo[i] = 0; | |
96 } | |
97 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
99 _calls++; | |
100 unsigned char* ujb = (unsigned char*)card_ptr; | |
101 int ind = (int)(*ujb); | |
102 _histo[ind]++; | |
103 *card_ptr = -1; | |
104 } | |
105 return true; | |
106 } | |
107 int calls() { return _calls; } | |
108 void print_histo() { | |
109 gclog_or_tty->print_cr("Card table value histogram:"); | |
110 for (int i = 0; i < 256; i++) { | |
111 if (_histo[i] != 0) { | |
112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); | |
113 } | |
114 } | |
115 } | |
116 }; | |
117 | |
118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { | |
119 int _calls; | |
120 G1CollectedHeap* _g1h; | |
121 CardTableModRefBS* _ctbs; | |
122 public: | |
123 RedirtyLoggedCardTableEntryClosure() : | |
124 _calls(0) | |
125 { | |
126 _g1h = G1CollectedHeap::heap(); | |
127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); | |
128 } | |
129 bool do_card_ptr(jbyte* card_ptr, int worker_i) { | |
130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { | |
131 _calls++; | |
132 *card_ptr = 0; | |
133 } | |
134 return true; | |
135 } | |
136 int calls() { return _calls; } | |
137 }; | |
138 | |
139 YoungList::YoungList(G1CollectedHeap* g1h) | |
140 : _g1h(g1h), _head(NULL), | |
141 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), | |
142 _length(0), _scan_only_length(0), | |
143 _last_sampled_rs_lengths(0), | |
144 _survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0) | |
145 { | |
146 guarantee( check_list_empty(false), "just making sure..." ); | |
147 } | |
148 | |
149 void YoungList::push_region(HeapRegion *hr) { | |
150 assert(!hr->is_young(), "should not already be young"); | |
151 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
152 | |
153 hr->set_next_young_region(_head); | |
154 _head = hr; | |
155 | |
156 hr->set_young(); | |
157 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); | |
158 ++_length; | |
159 } | |
160 | |
161 void YoungList::add_survivor_region(HeapRegion* hr) { | |
162 assert(!hr->is_survivor(), "should not already be for survived"); | |
163 assert(hr->get_next_young_region() == NULL, "cause it should!"); | |
164 | |
165 hr->set_next_young_region(_survivor_head); | |
166 if (_survivor_head == NULL) { | |
167 _survivors_tail = hr; | |
168 } | |
169 _survivor_head = hr; | |
170 | |
171 hr->set_survivor(); | |
172 ++_survivor_length; | |
173 } | |
174 | |
175 HeapRegion* YoungList::pop_region() { | |
176 while (_head != NULL) { | |
177 assert( length() > 0, "list should not be empty" ); | |
178 HeapRegion* ret = _head; | |
179 _head = ret->get_next_young_region(); | |
180 ret->set_next_young_region(NULL); | |
181 --_length; | |
182 assert(ret->is_young(), "region should be very young"); | |
183 | |
184 // Replace 'Survivor' region type with 'Young'. So the region will | |
185 // be treated as a young region and will not be 'confused' with | |
186 // newly created survivor regions. | |
187 if (ret->is_survivor()) { | |
188 ret->set_young(); | |
189 } | |
190 | |
191 if (!ret->is_scan_only()) { | |
192 return ret; | |
193 } | |
194 | |
195 // scan-only, we'll add it to the scan-only list | |
196 if (_scan_only_tail == NULL) { | |
197 guarantee( _scan_only_head == NULL, "invariant" ); | |
198 | |
199 _scan_only_head = ret; | |
200 _curr_scan_only = ret; | |
201 } else { | |
202 guarantee( _scan_only_head != NULL, "invariant" ); | |
203 _scan_only_tail->set_next_young_region(ret); | |
204 } | |
205 guarantee( ret->get_next_young_region() == NULL, "invariant" ); | |
206 _scan_only_tail = ret; | |
207 | |
208 // no need to be tagged as scan-only any more | |
209 ret->set_young(); | |
210 | |
211 ++_scan_only_length; | |
212 } | |
213 assert( length() == 0, "list should be empty" ); | |
214 return NULL; | |
215 } | |
216 | |
217 void YoungList::empty_list(HeapRegion* list) { | |
218 while (list != NULL) { | |
219 HeapRegion* next = list->get_next_young_region(); | |
220 list->set_next_young_region(NULL); | |
221 list->uninstall_surv_rate_group(); | |
222 list->set_not_young(); | |
223 list = next; | |
224 } | |
225 } | |
226 | |
227 void YoungList::empty_list() { | |
228 assert(check_list_well_formed(), "young list should be well formed"); | |
229 | |
230 empty_list(_head); | |
231 _head = NULL; | |
232 _length = 0; | |
233 | |
234 empty_list(_scan_only_head); | |
235 _scan_only_head = NULL; | |
236 _scan_only_tail = NULL; | |
237 _scan_only_length = 0; | |
238 _curr_scan_only = NULL; | |
239 | |
240 empty_list(_survivor_head); | |
241 _survivor_head = NULL; | |
242 _survivors_tail = NULL; | |
243 _survivor_length = 0; | |
244 | |
245 _last_sampled_rs_lengths = 0; | |
246 | |
247 assert(check_list_empty(false), "just making sure..."); | |
248 } | |
249 | |
250 bool YoungList::check_list_well_formed() { | |
251 bool ret = true; | |
252 | |
253 size_t length = 0; | |
254 HeapRegion* curr = _head; | |
255 HeapRegion* last = NULL; | |
256 while (curr != NULL) { | |
257 if (!curr->is_young() || curr->is_scan_only()) { | |
258 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
259 "incorrectly tagged (%d, %d)", | |
260 curr->bottom(), curr->end(), | |
261 curr->is_young(), curr->is_scan_only()); | |
262 ret = false; | |
263 } | |
264 ++length; | |
265 last = curr; | |
266 curr = curr->get_next_young_region(); | |
267 } | |
268 ret = ret && (length == _length); | |
269 | |
270 if (!ret) { | |
271 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); | |
272 gclog_or_tty->print_cr("### list has %d entries, _length is %d", | |
273 length, _length); | |
274 } | |
275 | |
276 bool scan_only_ret = true; | |
277 length = 0; | |
278 curr = _scan_only_head; | |
279 last = NULL; | |
280 while (curr != NULL) { | |
281 if (!curr->is_young() || curr->is_scan_only()) { | |
282 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " | |
283 "incorrectly tagged (%d, %d)", | |
284 curr->bottom(), curr->end(), | |
285 curr->is_young(), curr->is_scan_only()); | |
286 scan_only_ret = false; | |
287 } | |
288 ++length; | |
289 last = curr; | |
290 curr = curr->get_next_young_region(); | |
291 } | |
292 scan_only_ret = scan_only_ret && (length == _scan_only_length); | |
293 | |
294 if ( (last != _scan_only_tail) || | |
295 (_scan_only_head == NULL && _scan_only_tail != NULL) || | |
296 (_scan_only_head != NULL && _scan_only_tail == NULL) ) { | |
297 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); | |
298 scan_only_ret = false; | |
299 } | |
300 | |
301 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { | |
302 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); | |
303 scan_only_ret = false; | |
304 } | |
305 | |
306 if (!scan_only_ret) { | |
307 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); | |
308 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", | |
309 length, _scan_only_length); | |
310 } | |
311 | |
312 return ret && scan_only_ret; | |
313 } | |
314 | |
315 bool YoungList::check_list_empty(bool ignore_scan_only_list, | |
316 bool check_sample) { | |
317 bool ret = true; | |
318 | |
319 if (_length != 0) { | |
320 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", | |
321 _length); | |
322 ret = false; | |
323 } | |
324 if (check_sample && _last_sampled_rs_lengths != 0) { | |
325 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); | |
326 ret = false; | |
327 } | |
328 if (_head != NULL) { | |
329 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); | |
330 ret = false; | |
331 } | |
332 if (!ret) { | |
333 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); | |
334 } | |
335 | |
336 if (ignore_scan_only_list) | |
337 return ret; | |
338 | |
339 bool scan_only_ret = true; | |
340 if (_scan_only_length != 0) { | |
341 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", | |
342 _scan_only_length); | |
343 scan_only_ret = false; | |
344 } | |
345 if (_scan_only_head != NULL) { | |
346 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); | |
347 scan_only_ret = false; | |
348 } | |
349 if (_scan_only_tail != NULL) { | |
350 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); | |
351 scan_only_ret = false; | |
352 } | |
353 if (!scan_only_ret) { | |
354 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); | |
355 } | |
356 | |
357 return ret && scan_only_ret; | |
358 } | |
359 | |
360 void | |
361 YoungList::rs_length_sampling_init() { | |
362 _sampled_rs_lengths = 0; | |
363 _curr = _head; | |
364 } | |
365 | |
366 bool | |
367 YoungList::rs_length_sampling_more() { | |
368 return _curr != NULL; | |
369 } | |
370 | |
371 void | |
372 YoungList::rs_length_sampling_next() { | |
373 assert( _curr != NULL, "invariant" ); | |
374 _sampled_rs_lengths += _curr->rem_set()->occupied(); | |
375 _curr = _curr->get_next_young_region(); | |
376 if (_curr == NULL) { | |
377 _last_sampled_rs_lengths = _sampled_rs_lengths; | |
378 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); | |
379 } | |
380 } | |
381 | |
382 void | |
383 YoungList::reset_auxilary_lists() { | |
384 // We could have just "moved" the scan-only list to the young list. | |
385 // However, the scan-only list is ordered according to the region | |
386 // age in descending order, so, by moving one entry at a time, we | |
387 // ensure that it is recreated in ascending order. | |
388 | |
389 guarantee( is_empty(), "young list should be empty" ); | |
390 assert(check_list_well_formed(), "young list should be well formed"); | |
391 | |
392 // Add survivor regions to SurvRateGroup. | |
393 _g1h->g1_policy()->note_start_adding_survivor_regions(); | |
394 for (HeapRegion* curr = _survivor_head; | |
395 curr != NULL; | |
396 curr = curr->get_next_young_region()) { | |
397 _g1h->g1_policy()->set_region_survivors(curr); | |
398 } | |
399 _g1h->g1_policy()->note_stop_adding_survivor_regions(); | |
400 | |
401 if (_survivor_head != NULL) { | |
402 _head = _survivor_head; | |
403 _length = _survivor_length + _scan_only_length; | |
404 _survivors_tail->set_next_young_region(_scan_only_head); | |
405 } else { | |
406 _head = _scan_only_head; | |
407 _length = _scan_only_length; | |
408 } | |
409 | |
410 for (HeapRegion* curr = _scan_only_head; | |
411 curr != NULL; | |
412 curr = curr->get_next_young_region()) { | |
413 curr->recalculate_age_in_surv_rate_group(); | |
414 } | |
415 _scan_only_head = NULL; | |
416 _scan_only_tail = NULL; | |
417 _scan_only_length = 0; | |
418 _curr_scan_only = NULL; | |
419 | |
420 _survivor_head = NULL; | |
421 _survivors_tail = NULL; | |
422 _survivor_length = 0; | |
423 _g1h->g1_policy()->finished_recalculating_age_indexes(); | |
424 | |
425 assert(check_list_well_formed(), "young list should be well formed"); | |
426 } | |
427 | |
428 void YoungList::print() { | |
429 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; | |
430 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; | |
431 | |
432 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { | |
433 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); | |
434 HeapRegion *curr = lists[list]; | |
435 if (curr == NULL) | |
436 gclog_or_tty->print_cr(" empty"); | |
437 while (curr != NULL) { | |
438 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " | |
439 "age: %4d, y: %d, s-o: %d, surv: %d", | |
440 curr->bottom(), curr->end(), | |
441 curr->top(), | |
442 curr->prev_top_at_mark_start(), | |
443 curr->next_top_at_mark_start(), | |
444 curr->top_at_conc_mark_count(), | |
445 curr->age_in_surv_rate_group_cond(), | |
446 curr->is_young(), | |
447 curr->is_scan_only(), | |
448 curr->is_survivor()); | |
449 curr = curr->get_next_young_region(); | |
450 } | |
451 } | |
452 | |
453 gclog_or_tty->print_cr(""); | |
454 } | |
455 | |
456 void G1CollectedHeap::stop_conc_gc_threads() { | |
457 _cg1r->cg1rThread()->stop(); | |
458 _czft->stop(); | |
459 _cmThread->stop(); | |
460 } | |
461 | |
462 | |
463 void G1CollectedHeap::check_ct_logs_at_safepoint() { | |
464 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
465 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); | |
466 | |
467 // Count the dirty cards at the start. | |
468 CountNonCleanMemRegionClosure count1(this); | |
469 ct_bs->mod_card_iterate(&count1); | |
470 int orig_count = count1.n(); | |
471 | |
472 // First clear the logged cards. | |
473 ClearLoggedCardTableEntryClosure clear; | |
474 dcqs.set_closure(&clear); | |
475 dcqs.apply_closure_to_all_completed_buffers(); | |
476 dcqs.iterate_closure_all_threads(false); | |
477 clear.print_histo(); | |
478 | |
479 // Now ensure that there's no dirty cards. | |
480 CountNonCleanMemRegionClosure count2(this); | |
481 ct_bs->mod_card_iterate(&count2); | |
482 if (count2.n() != 0) { | |
483 gclog_or_tty->print_cr("Card table has %d entries; %d originally", | |
484 count2.n(), orig_count); | |
485 } | |
486 guarantee(count2.n() == 0, "Card table should be clean."); | |
487 | |
488 RedirtyLoggedCardTableEntryClosure redirty; | |
489 JavaThread::dirty_card_queue_set().set_closure(&redirty); | |
490 dcqs.apply_closure_to_all_completed_buffers(); | |
491 dcqs.iterate_closure_all_threads(false); | |
492 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", | |
493 clear.calls(), orig_count); | |
494 guarantee(redirty.calls() == clear.calls(), | |
495 "Or else mechanism is broken."); | |
496 | |
497 CountNonCleanMemRegionClosure count3(this); | |
498 ct_bs->mod_card_iterate(&count3); | |
499 if (count3.n() != orig_count) { | |
500 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", | |
501 orig_count, count3.n()); | |
502 guarantee(count3.n() >= orig_count, "Should have restored them all."); | |
503 } | |
504 | |
505 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
506 } | |
507 | |
508 // Private class members. | |
509 | |
510 G1CollectedHeap* G1CollectedHeap::_g1h; | |
511 | |
512 // Private methods. | |
513 | |
514 // Finds a HeapRegion that can be used to allocate a given size of block. | |
515 | |
516 | |
517 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, | |
518 bool do_expand, | |
519 bool zero_filled) { | |
520 ConcurrentZFThread::note_region_alloc(); | |
521 HeapRegion* res = alloc_free_region_from_lists(zero_filled); | |
522 if (res == NULL && do_expand) { | |
523 expand(word_size * HeapWordSize); | |
524 res = alloc_free_region_from_lists(zero_filled); | |
525 assert(res == NULL || | |
526 (!res->isHumongous() && | |
527 (!zero_filled || | |
528 res->zero_fill_state() == HeapRegion::Allocated)), | |
529 "Alloc Regions must be zero filled (and non-H)"); | |
530 } | |
531 if (res != NULL && res->is_empty()) _free_regions--; | |
532 assert(res == NULL || | |
533 (!res->isHumongous() && | |
534 (!zero_filled || | |
535 res->zero_fill_state() == HeapRegion::Allocated)), | |
536 "Non-young alloc Regions must be zero filled (and non-H)"); | |
537 | |
538 if (G1TraceRegions) { | |
539 if (res != NULL) { | |
540 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " | |
541 "top "PTR_FORMAT, | |
542 res->hrs_index(), res->bottom(), res->end(), res->top()); | |
543 } | |
544 } | |
545 | |
546 return res; | |
547 } | |
548 | |
549 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, | |
550 size_t word_size, | |
551 bool zero_filled) { | |
552 HeapRegion* alloc_region = NULL; | |
553 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { | |
554 alloc_region = newAllocRegion_work(word_size, true, zero_filled); | |
555 if (purpose == GCAllocForSurvived && alloc_region != NULL) { | |
556 _young_list->add_survivor_region(alloc_region); | |
557 } | |
558 ++_gc_alloc_region_counts[purpose]; | |
559 } else { | |
560 g1_policy()->note_alloc_region_limit_reached(purpose); | |
561 } | |
562 return alloc_region; | |
563 } | |
564 | |
565 // If could fit into free regions w/o expansion, try. | |
566 // Otherwise, if can expand, do so. | |
567 // Otherwise, if using ex regions might help, try with ex given back. | |
568 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { | |
569 assert(regions_accounted_for(), "Region leakage!"); | |
570 | |
571 // We can't allocate H regions while cleanupComplete is running, since | |
572 // some of the regions we find to be empty might not yet be added to the | |
573 // unclean list. (If we're already at a safepoint, this call is | |
574 // unnecessary, not to mention wrong.) | |
575 if (!SafepointSynchronize::is_at_safepoint()) | |
576 wait_for_cleanup_complete(); | |
577 | |
578 size_t num_regions = | |
579 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; | |
580 | |
581 // Special case if < one region??? | |
582 | |
583 // Remember the ft size. | |
584 size_t x_size = expansion_regions(); | |
585 | |
586 HeapWord* res = NULL; | |
587 bool eliminated_allocated_from_lists = false; | |
588 | |
589 // Can the allocation potentially fit in the free regions? | |
590 if (free_regions() >= num_regions) { | |
591 res = _hrs->obj_allocate(word_size); | |
592 } | |
593 if (res == NULL) { | |
594 // Try expansion. | |
595 size_t fs = _hrs->free_suffix(); | |
596 if (fs + x_size >= num_regions) { | |
597 expand((num_regions - fs) * HeapRegion::GrainBytes); | |
598 res = _hrs->obj_allocate(word_size); | |
599 assert(res != NULL, "This should have worked."); | |
600 } else { | |
601 // Expansion won't help. Are there enough free regions if we get rid | |
602 // of reservations? | |
603 size_t avail = free_regions(); | |
604 if (avail >= num_regions) { | |
605 res = _hrs->obj_allocate(word_size); | |
606 if (res != NULL) { | |
607 remove_allocated_regions_from_lists(); | |
608 eliminated_allocated_from_lists = true; | |
609 } | |
610 } | |
611 } | |
612 } | |
613 if (res != NULL) { | |
614 // Increment by the number of regions allocated. | |
615 // FIXME: Assumes regions all of size GrainBytes. | |
616 #ifndef PRODUCT | |
617 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * | |
618 HeapRegion::GrainWords)); | |
619 #endif | |
620 if (!eliminated_allocated_from_lists) | |
621 remove_allocated_regions_from_lists(); | |
622 _summary_bytes_used += word_size * HeapWordSize; | |
623 _free_regions -= num_regions; | |
624 _num_humongous_regions += (int) num_regions; | |
625 } | |
626 assert(regions_accounted_for(), "Region Leakage"); | |
627 return res; | |
628 } | |
629 | |
630 HeapWord* | |
631 G1CollectedHeap::attempt_allocation_slow(size_t word_size, | |
632 bool permit_collection_pause) { | |
633 HeapWord* res = NULL; | |
634 HeapRegion* allocated_young_region = NULL; | |
635 | |
636 assert( SafepointSynchronize::is_at_safepoint() || | |
637 Heap_lock->owned_by_self(), "pre condition of the call" ); | |
638 | |
639 if (isHumongous(word_size)) { | |
640 // Allocation of a humongous object can, in a sense, complete a | |
641 // partial region, if the previous alloc was also humongous, and | |
642 // caused the test below to succeed. | |
643 if (permit_collection_pause) | |
644 do_collection_pause_if_appropriate(word_size); | |
645 res = humongousObjAllocate(word_size); | |
646 assert(_cur_alloc_region == NULL | |
647 || !_cur_alloc_region->isHumongous(), | |
648 "Prevent a regression of this bug."); | |
649 | |
650 } else { | |
354
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
651 // We may have concurrent cleanup working at the time. Wait for it |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
652 // to complete. In the future we would probably want to make the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
653 // concurrent cleanup truly concurrent by decoupling it from the |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
654 // allocation. |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
655 if (!SafepointSynchronize::is_at_safepoint()) |
c0f8f7790199
6652160: G1: assert(cur_used_bytes == _g1->recalculate_used(),"It should!") at g1CollectorPolicy.cpp:1425
iveresov
parents:
353
diff
changeset
|
656 wait_for_cleanup_complete(); |
342 | 657 // If we do a collection pause, this will be reset to a non-NULL |
658 // value. If we don't, nulling here ensures that we allocate a new | |
659 // region below. | |
660 if (_cur_alloc_region != NULL) { | |
661 // We're finished with the _cur_alloc_region. | |
662 _summary_bytes_used += _cur_alloc_region->used(); | |
663 _cur_alloc_region = NULL; | |
664 } | |
665 assert(_cur_alloc_region == NULL, "Invariant."); | |
666 // Completion of a heap region is perhaps a good point at which to do | |
667 // a collection pause. | |
668 if (permit_collection_pause) | |
669 do_collection_pause_if_appropriate(word_size); | |
670 // Make sure we have an allocation region available. | |
671 if (_cur_alloc_region == NULL) { | |
672 if (!SafepointSynchronize::is_at_safepoint()) | |
673 wait_for_cleanup_complete(); | |
674 bool next_is_young = should_set_young_locked(); | |
675 // If the next region is not young, make sure it's zero-filled. | |
676 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); | |
677 if (_cur_alloc_region != NULL) { | |
678 _summary_bytes_used -= _cur_alloc_region->used(); | |
679 if (next_is_young) { | |
680 set_region_short_lived_locked(_cur_alloc_region); | |
681 allocated_young_region = _cur_alloc_region; | |
682 } | |
683 } | |
684 } | |
685 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), | |
686 "Prevent a regression of this bug."); | |
687 | |
688 // Now retry the allocation. | |
689 if (_cur_alloc_region != NULL) { | |
690 res = _cur_alloc_region->allocate(word_size); | |
691 } | |
692 } | |
693 | |
694 // NOTE: fails frequently in PRT | |
695 assert(regions_accounted_for(), "Region leakage!"); | |
696 | |
697 if (res != NULL) { | |
698 if (!SafepointSynchronize::is_at_safepoint()) { | |
699 assert( permit_collection_pause, "invariant" ); | |
700 assert( Heap_lock->owned_by_self(), "invariant" ); | |
701 Heap_lock->unlock(); | |
702 } | |
703 | |
704 if (allocated_young_region != NULL) { | |
705 HeapRegion* hr = allocated_young_region; | |
706 HeapWord* bottom = hr->bottom(); | |
707 HeapWord* end = hr->end(); | |
708 MemRegion mr(bottom, end); | |
709 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); | |
710 } | |
711 } | |
712 | |
713 assert( SafepointSynchronize::is_at_safepoint() || | |
714 (res == NULL && Heap_lock->owned_by_self()) || | |
715 (res != NULL && !Heap_lock->owned_by_self()), | |
716 "post condition of the call" ); | |
717 | |
718 return res; | |
719 } | |
720 | |
721 HeapWord* | |
722 G1CollectedHeap::mem_allocate(size_t word_size, | |
723 bool is_noref, | |
724 bool is_tlab, | |
725 bool* gc_overhead_limit_was_exceeded) { | |
726 debug_only(check_for_valid_allocation_state()); | |
727 assert(no_gc_in_progress(), "Allocation during gc not allowed"); | |
728 HeapWord* result = NULL; | |
729 | |
730 // Loop until the allocation is satisified, | |
731 // or unsatisfied after GC. | |
732 for (int try_count = 1; /* return or throw */; try_count += 1) { | |
733 int gc_count_before; | |
734 { | |
735 Heap_lock->lock(); | |
736 result = attempt_allocation(word_size); | |
737 if (result != NULL) { | |
738 // attempt_allocation should have unlocked the heap lock | |
739 assert(is_in(result), "result not in heap"); | |
740 return result; | |
741 } | |
742 // Read the gc count while the heap lock is held. | |
743 gc_count_before = SharedHeap::heap()->total_collections(); | |
744 Heap_lock->unlock(); | |
745 } | |
746 | |
747 // Create the garbage collection operation... | |
748 VM_G1CollectForAllocation op(word_size, | |
749 gc_count_before); | |
750 | |
751 // ...and get the VM thread to execute it. | |
752 VMThread::execute(&op); | |
753 if (op.prologue_succeeded()) { | |
754 result = op.result(); | |
755 assert(result == NULL || is_in(result), "result not in heap"); | |
756 return result; | |
757 } | |
758 | |
759 // Give a warning if we seem to be looping forever. | |
760 if ((QueuedAllocationWarningCount > 0) && | |
761 (try_count % QueuedAllocationWarningCount == 0)) { | |
762 warning("G1CollectedHeap::mem_allocate_work retries %d times", | |
763 try_count); | |
764 } | |
765 } | |
766 } | |
767 | |
768 void G1CollectedHeap::abandon_cur_alloc_region() { | |
769 if (_cur_alloc_region != NULL) { | |
770 // We're finished with the _cur_alloc_region. | |
771 if (_cur_alloc_region->is_empty()) { | |
772 _free_regions++; | |
773 free_region(_cur_alloc_region); | |
774 } else { | |
775 _summary_bytes_used += _cur_alloc_region->used(); | |
776 } | |
777 _cur_alloc_region = NULL; | |
778 } | |
779 } | |
780 | |
781 class PostMCRemSetClearClosure: public HeapRegionClosure { | |
782 ModRefBarrierSet* _mr_bs; | |
783 public: | |
784 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
785 bool doHeapRegion(HeapRegion* r) { | |
786 r->reset_gc_time_stamp(); | |
787 if (r->continuesHumongous()) | |
788 return false; | |
789 HeapRegionRemSet* hrrs = r->rem_set(); | |
790 if (hrrs != NULL) hrrs->clear(); | |
791 // You might think here that we could clear just the cards | |
792 // corresponding to the used region. But no: if we leave a dirty card | |
793 // in a region we might allocate into, then it would prevent that card | |
794 // from being enqueued, and cause it to be missed. | |
795 // Re: the performance cost: we shouldn't be doing full GC anyway! | |
796 _mr_bs->clear(MemRegion(r->bottom(), r->end())); | |
797 return false; | |
798 } | |
799 }; | |
800 | |
801 | |
802 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { | |
803 ModRefBarrierSet* _mr_bs; | |
804 public: | |
805 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} | |
806 bool doHeapRegion(HeapRegion* r) { | |
807 if (r->continuesHumongous()) return false; | |
808 if (r->used_region().word_size() != 0) { | |
809 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); | |
810 } | |
811 return false; | |
812 } | |
813 }; | |
814 | |
815 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, | |
816 size_t word_size) { | |
817 ResourceMark rm; | |
818 | |
819 if (full && DisableExplicitGC) { | |
820 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); | |
821 return; | |
822 } | |
823 | |
824 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
825 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
826 | |
827 if (GC_locker::is_active()) { | |
828 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
829 } | |
830 | |
831 { | |
832 IsGCActiveMark x; | |
833 | |
834 // Timing | |
835 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
836 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
837 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); | |
838 | |
839 double start = os::elapsedTime(); | |
840 GCOverheadReporter::recordSTWStart(start); | |
841 g1_policy()->record_full_collection_start(); | |
842 | |
843 gc_prologue(true); | |
844 increment_total_collections(); | |
845 | |
846 size_t g1h_prev_used = used(); | |
847 assert(used() == recalculate_used(), "Should be equal"); | |
848 | |
849 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
850 HandleMark hm; // Discard invalid handles created during verification | |
851 prepare_for_verify(); | |
852 gclog_or_tty->print(" VerifyBeforeGC:"); | |
853 Universe::verify(true); | |
854 } | |
855 assert(regions_accounted_for(), "Region leakage!"); | |
856 | |
857 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
858 | |
859 // We want to discover references, but not process them yet. | |
860 // This mode is disabled in | |
861 // instanceRefKlass::process_discovered_references if the | |
862 // generation does some collection work, or | |
863 // instanceRefKlass::enqueue_discovered_references if the | |
864 // generation returns without doing any work. | |
865 ref_processor()->disable_discovery(); | |
866 ref_processor()->abandon_partial_discovery(); | |
867 ref_processor()->verify_no_references_recorded(); | |
868 | |
869 // Abandon current iterations of concurrent marking and concurrent | |
870 // refinement, if any are in progress. | |
871 concurrent_mark()->abort(); | |
872 | |
873 // Make sure we'll choose a new allocation region afterwards. | |
874 abandon_cur_alloc_region(); | |
875 assert(_cur_alloc_region == NULL, "Invariant."); | |
876 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); | |
877 tear_down_region_lists(); | |
878 set_used_regions_to_need_zero_fill(); | |
879 if (g1_policy()->in_young_gc_mode()) { | |
880 empty_young_list(); | |
881 g1_policy()->set_full_young_gcs(true); | |
882 } | |
883 | |
884 // Temporarily make reference _discovery_ single threaded (non-MT). | |
885 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); | |
886 | |
887 // Temporarily make refs discovery atomic | |
888 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); | |
889 | |
890 // Temporarily clear _is_alive_non_header | |
891 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); | |
892 | |
893 ref_processor()->enable_discovery(); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
393
diff
changeset
|
894 ref_processor()->snap_policy(clear_all_soft_refs); |
342 | 895 |
896 // Do collection work | |
897 { | |
898 HandleMark hm; // Discard invalid handles created during gc | |
899 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); | |
900 } | |
901 // Because freeing humongous regions may have added some unclean | |
902 // regions, it is necessary to tear down again before rebuilding. | |
903 tear_down_region_lists(); | |
904 rebuild_region_lists(); | |
905 | |
906 _summary_bytes_used = recalculate_used(); | |
907 | |
908 ref_processor()->enqueue_discovered_references(); | |
909 | |
910 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
911 | |
912 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
913 HandleMark hm; // Discard invalid handles created during verification | |
914 gclog_or_tty->print(" VerifyAfterGC:"); | |
915 Universe::verify(false); | |
916 } | |
917 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
918 | |
919 reset_gc_time_stamp(); | |
920 // Since everything potentially moved, we will clear all remembered | |
921 // sets, and clear all cards. Later we will also cards in the used | |
922 // portion of the heap after the resizing (which could be a shrinking.) | |
923 // We will also reset the GC time stamps of the regions. | |
924 PostMCRemSetClearClosure rs_clear(mr_bs()); | |
925 heap_region_iterate(&rs_clear); | |
926 | |
927 // Resize the heap if necessary. | |
928 resize_if_necessary_after_full_collection(full ? 0 : word_size); | |
929 | |
930 // Since everything potentially moved, we will clear all remembered | |
931 // sets, but also dirty all cards corresponding to used regions. | |
932 PostMCRemSetInvalidateClosure rs_invalidate(mr_bs()); | |
933 heap_region_iterate(&rs_invalidate); | |
934 if (_cg1r->use_cache()) { | |
935 _cg1r->clear_and_record_card_counts(); | |
936 _cg1r->clear_hot_cache(); | |
937 } | |
938 | |
939 if (PrintGC) { | |
940 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); | |
941 } | |
942 | |
943 if (true) { // FIXME | |
944 // Ask the permanent generation to adjust size for full collections | |
945 perm()->compute_new_size(); | |
946 } | |
947 | |
948 double end = os::elapsedTime(); | |
949 GCOverheadReporter::recordSTWEnd(end); | |
950 g1_policy()->record_full_collection_end(); | |
951 | |
952 gc_epilogue(true); | |
953 | |
954 // Abandon concurrent refinement. This must happen last: in the | |
955 // dirty-card logging system, some cards may be dirty by weak-ref | |
956 // processing, and may be enqueued. But the whole card table is | |
957 // dirtied, so this should abandon those logs, and set "do_traversal" | |
958 // to true. | |
959 concurrent_g1_refine()->set_pya_restart(); | |
960 | |
961 assert(regions_accounted_for(), "Region leakage!"); | |
962 } | |
963 | |
964 if (g1_policy()->in_young_gc_mode()) { | |
965 _young_list->reset_sampled_info(); | |
966 assert( check_young_list_empty(false, false), | |
967 "young list should be empty at this point"); | |
968 } | |
969 } | |
970 | |
971 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { | |
972 do_collection(true, clear_all_soft_refs, 0); | |
973 } | |
974 | |
975 // This code is mostly copied from TenuredGeneration. | |
976 void | |
977 G1CollectedHeap:: | |
978 resize_if_necessary_after_full_collection(size_t word_size) { | |
979 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); | |
980 | |
981 // Include the current allocation, if any, and bytes that will be | |
982 // pre-allocated to support collections, as "used". | |
983 const size_t used_after_gc = used(); | |
984 const size_t capacity_after_gc = capacity(); | |
985 const size_t free_after_gc = capacity_after_gc - used_after_gc; | |
986 | |
987 // We don't have floating point command-line arguments | |
988 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; | |
989 const double maximum_used_percentage = 1.0 - minimum_free_percentage; | |
990 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
991 const double minimum_used_percentage = 1.0 - maximum_free_percentage; | |
992 | |
993 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); | |
994 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); | |
995 | |
996 // Don't shrink less than the initial size. | |
997 minimum_desired_capacity = | |
998 MAX2(minimum_desired_capacity, | |
999 collector_policy()->initial_heap_byte_size()); | |
1000 maximum_desired_capacity = | |
1001 MAX2(maximum_desired_capacity, | |
1002 collector_policy()->initial_heap_byte_size()); | |
1003 | |
1004 // We are failing here because minimum_desired_capacity is | |
1005 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); | |
1006 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); | |
1007 | |
1008 if (PrintGC && Verbose) { | |
1009 const double free_percentage = ((double)free_after_gc) / capacity(); | |
1010 gclog_or_tty->print_cr("Computing new size after full GC "); | |
1011 gclog_or_tty->print_cr(" " | |
1012 " minimum_free_percentage: %6.2f", | |
1013 minimum_free_percentage); | |
1014 gclog_or_tty->print_cr(" " | |
1015 " maximum_free_percentage: %6.2f", | |
1016 maximum_free_percentage); | |
1017 gclog_or_tty->print_cr(" " | |
1018 " capacity: %6.1fK" | |
1019 " minimum_desired_capacity: %6.1fK" | |
1020 " maximum_desired_capacity: %6.1fK", | |
1021 capacity() / (double) K, | |
1022 minimum_desired_capacity / (double) K, | |
1023 maximum_desired_capacity / (double) K); | |
1024 gclog_or_tty->print_cr(" " | |
1025 " free_after_gc : %6.1fK" | |
1026 " used_after_gc : %6.1fK", | |
1027 free_after_gc / (double) K, | |
1028 used_after_gc / (double) K); | |
1029 gclog_or_tty->print_cr(" " | |
1030 " free_percentage: %6.2f", | |
1031 free_percentage); | |
1032 } | |
1033 if (capacity() < minimum_desired_capacity) { | |
1034 // Don't expand unless it's significant | |
1035 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; | |
1036 expand(expand_bytes); | |
1037 if (PrintGC && Verbose) { | |
1038 gclog_or_tty->print_cr(" expanding:" | |
1039 " minimum_desired_capacity: %6.1fK" | |
1040 " expand_bytes: %6.1fK", | |
1041 minimum_desired_capacity / (double) K, | |
1042 expand_bytes / (double) K); | |
1043 } | |
1044 | |
1045 // No expansion, now see if we want to shrink | |
1046 } else if (capacity() > maximum_desired_capacity) { | |
1047 // Capacity too large, compute shrinking size | |
1048 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; | |
1049 shrink(shrink_bytes); | |
1050 if (PrintGC && Verbose) { | |
1051 gclog_or_tty->print_cr(" " | |
1052 " shrinking:" | |
1053 " initSize: %.1fK" | |
1054 " maximum_desired_capacity: %.1fK", | |
1055 collector_policy()->initial_heap_byte_size() / (double) K, | |
1056 maximum_desired_capacity / (double) K); | |
1057 gclog_or_tty->print_cr(" " | |
1058 " shrink_bytes: %.1fK", | |
1059 shrink_bytes / (double) K); | |
1060 } | |
1061 } | |
1062 } | |
1063 | |
1064 | |
1065 HeapWord* | |
1066 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { | |
1067 HeapWord* result = NULL; | |
1068 | |
1069 // In a G1 heap, we're supposed to keep allocation from failing by | |
1070 // incremental pauses. Therefore, at least for now, we'll favor | |
1071 // expansion over collection. (This might change in the future if we can | |
1072 // do something smarter than full collection to satisfy a failed alloc.) | |
1073 | |
1074 result = expand_and_allocate(word_size); | |
1075 if (result != NULL) { | |
1076 assert(is_in(result), "result not in heap"); | |
1077 return result; | |
1078 } | |
1079 | |
1080 // OK, I guess we have to try collection. | |
1081 | |
1082 do_collection(false, false, word_size); | |
1083 | |
1084 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1085 | |
1086 if (result != NULL) { | |
1087 assert(is_in(result), "result not in heap"); | |
1088 return result; | |
1089 } | |
1090 | |
1091 // Try collecting soft references. | |
1092 do_collection(false, true, word_size); | |
1093 result = attempt_allocation(word_size, /*permit_collection_pause*/false); | |
1094 if (result != NULL) { | |
1095 assert(is_in(result), "result not in heap"); | |
1096 return result; | |
1097 } | |
1098 | |
1099 // What else? We might try synchronous finalization later. If the total | |
1100 // space available is large enough for the allocation, then a more | |
1101 // complete compaction phase than we've tried so far might be | |
1102 // appropriate. | |
1103 return NULL; | |
1104 } | |
1105 | |
1106 // Attempting to expand the heap sufficiently | |
1107 // to support an allocation of the given "word_size". If | |
1108 // successful, perform the allocation and return the address of the | |
1109 // allocated block, or else "NULL". | |
1110 | |
1111 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { | |
1112 size_t expand_bytes = word_size * HeapWordSize; | |
1113 if (expand_bytes < MinHeapDeltaBytes) { | |
1114 expand_bytes = MinHeapDeltaBytes; | |
1115 } | |
1116 expand(expand_bytes); | |
1117 assert(regions_accounted_for(), "Region leakage!"); | |
1118 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); | |
1119 return result; | |
1120 } | |
1121 | |
1122 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { | |
1123 size_t pre_used = 0; | |
1124 size_t cleared_h_regions = 0; | |
1125 size_t freed_regions = 0; | |
1126 UncleanRegionList local_list; | |
1127 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, | |
1128 freed_regions, &local_list); | |
1129 | |
1130 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
1131 &local_list); | |
1132 return pre_used; | |
1133 } | |
1134 | |
1135 void | |
1136 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, | |
1137 size_t& pre_used, | |
1138 size_t& cleared_h, | |
1139 size_t& freed_regions, | |
1140 UncleanRegionList* list, | |
1141 bool par) { | |
1142 assert(!hr->continuesHumongous(), "should have filtered these out"); | |
1143 size_t res = 0; | |
1144 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) { | |
1145 if (!hr->is_young()) { | |
1146 if (G1PolicyVerbose > 0) | |
1147 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" | |
1148 " during cleanup", hr, hr->used()); | |
1149 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); | |
1150 } | |
1151 } | |
1152 } | |
1153 | |
1154 // FIXME: both this and shrink could probably be more efficient by | |
1155 // doing one "VirtualSpace::expand_by" call rather than several. | |
1156 void G1CollectedHeap::expand(size_t expand_bytes) { | |
1157 size_t old_mem_size = _g1_storage.committed_size(); | |
1158 // We expand by a minimum of 1K. | |
1159 expand_bytes = MAX2(expand_bytes, (size_t)K); | |
1160 size_t aligned_expand_bytes = | |
1161 ReservedSpace::page_align_size_up(expand_bytes); | |
1162 aligned_expand_bytes = align_size_up(aligned_expand_bytes, | |
1163 HeapRegion::GrainBytes); | |
1164 expand_bytes = aligned_expand_bytes; | |
1165 while (expand_bytes > 0) { | |
1166 HeapWord* base = (HeapWord*)_g1_storage.high(); | |
1167 // Commit more storage. | |
1168 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); | |
1169 if (!successful) { | |
1170 expand_bytes = 0; | |
1171 } else { | |
1172 expand_bytes -= HeapRegion::GrainBytes; | |
1173 // Expand the committed region. | |
1174 HeapWord* high = (HeapWord*) _g1_storage.high(); | |
1175 _g1_committed.set_end(high); | |
1176 // Create a new HeapRegion. | |
1177 MemRegion mr(base, high); | |
1178 bool is_zeroed = !_g1_max_committed.contains(base); | |
1179 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); | |
1180 | |
1181 // Now update max_committed if necessary. | |
1182 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); | |
1183 | |
1184 // Add it to the HeapRegionSeq. | |
1185 _hrs->insert(hr); | |
1186 // Set the zero-fill state, according to whether it's already | |
1187 // zeroed. | |
1188 { | |
1189 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
1190 if (is_zeroed) { | |
1191 hr->set_zero_fill_complete(); | |
1192 put_free_region_on_list_locked(hr); | |
1193 } else { | |
1194 hr->set_zero_fill_needed(); | |
1195 put_region_on_unclean_list_locked(hr); | |
1196 } | |
1197 } | |
1198 _free_regions++; | |
1199 // And we used up an expansion region to create it. | |
1200 _expansion_regions--; | |
1201 // Tell the cardtable about it. | |
1202 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1203 // And the offset table as well. | |
1204 _bot_shared->resize(_g1_committed.word_size()); | |
1205 } | |
1206 } | |
1207 if (Verbose && PrintGC) { | |
1208 size_t new_mem_size = _g1_storage.committed_size(); | |
1209 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", | |
1210 old_mem_size/K, aligned_expand_bytes/K, | |
1211 new_mem_size/K); | |
1212 } | |
1213 } | |
1214 | |
1215 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) | |
1216 { | |
1217 size_t old_mem_size = _g1_storage.committed_size(); | |
1218 size_t aligned_shrink_bytes = | |
1219 ReservedSpace::page_align_size_down(shrink_bytes); | |
1220 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, | |
1221 HeapRegion::GrainBytes); | |
1222 size_t num_regions_deleted = 0; | |
1223 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); | |
1224 | |
1225 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1226 if (mr.byte_size() > 0) | |
1227 _g1_storage.shrink_by(mr.byte_size()); | |
1228 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); | |
1229 | |
1230 _g1_committed.set_end(mr.start()); | |
1231 _free_regions -= num_regions_deleted; | |
1232 _expansion_regions += num_regions_deleted; | |
1233 | |
1234 // Tell the cardtable about it. | |
1235 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); | |
1236 | |
1237 // And the offset table as well. | |
1238 _bot_shared->resize(_g1_committed.word_size()); | |
1239 | |
1240 HeapRegionRemSet::shrink_heap(n_regions()); | |
1241 | |
1242 if (Verbose && PrintGC) { | |
1243 size_t new_mem_size = _g1_storage.committed_size(); | |
1244 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", | |
1245 old_mem_size/K, aligned_shrink_bytes/K, | |
1246 new_mem_size/K); | |
1247 } | |
1248 } | |
1249 | |
1250 void G1CollectedHeap::shrink(size_t shrink_bytes) { | |
1251 release_gc_alloc_regions(); | |
1252 tear_down_region_lists(); // We will rebuild them in a moment. | |
1253 shrink_helper(shrink_bytes); | |
1254 rebuild_region_lists(); | |
1255 } | |
1256 | |
1257 // Public methods. | |
1258 | |
1259 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
1260 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
1261 #endif // _MSC_VER | |
1262 | |
1263 | |
1264 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : | |
1265 SharedHeap(policy_), | |
1266 _g1_policy(policy_), | |
1267 _ref_processor(NULL), | |
1268 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), | |
1269 _bot_shared(NULL), | |
1270 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), | |
1271 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), | |
1272 _evac_failure_scan_stack(NULL) , | |
1273 _mark_in_progress(false), | |
1274 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), | |
1275 _cur_alloc_region(NULL), | |
1276 _refine_cte_cl(NULL), | |
1277 _free_region_list(NULL), _free_region_list_size(0), | |
1278 _free_regions(0), | |
1279 _popular_object_boundary(NULL), | |
1280 _cur_pop_hr_index(0), | |
1281 _popular_regions_to_be_evacuated(NULL), | |
1282 _pop_obj_rc_at_copy(), | |
1283 _full_collection(false), | |
1284 _unclean_region_list(), | |
1285 _unclean_regions_coming(false), | |
1286 _young_list(new YoungList(this)), | |
1287 _gc_time_stamp(0), | |
1288 _surviving_young_words(NULL) | |
1289 { | |
1290 _g1h = this; // To catch bugs. | |
1291 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | |
1292 vm_exit_during_initialization("Failed necessary allocation."); | |
1293 } | |
1294 int n_queues = MAX2((int)ParallelGCThreads, 1); | |
1295 _task_queues = new RefToScanQueueSet(n_queues); | |
1296 | |
1297 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); | |
1298 assert(n_rem_sets > 0, "Invariant."); | |
1299 | |
1300 HeapRegionRemSetIterator** iter_arr = | |
1301 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); | |
1302 for (int i = 0; i < n_queues; i++) { | |
1303 iter_arr[i] = new HeapRegionRemSetIterator(); | |
1304 } | |
1305 _rem_set_iterator = iter_arr; | |
1306 | |
1307 for (int i = 0; i < n_queues; i++) { | |
1308 RefToScanQueue* q = new RefToScanQueue(); | |
1309 q->initialize(); | |
1310 _task_queues->register_queue(i, q); | |
1311 } | |
1312 | |
1313 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
1314 _gc_alloc_regions[ap] = NULL; | |
1315 _gc_alloc_region_counts[ap] = 0; | |
1316 } | |
1317 guarantee(_task_queues != NULL, "task_queues allocation failure."); | |
1318 } | |
1319 | |
1320 jint G1CollectedHeap::initialize() { | |
1321 os::enable_vtime(); | |
1322 | |
1323 // Necessary to satisfy locking discipline assertions. | |
1324 | |
1325 MutexLocker x(Heap_lock); | |
1326 | |
1327 // While there are no constraints in the GC code that HeapWordSize | |
1328 // be any particular value, there are multiple other areas in the | |
1329 // system which believe this to be true (e.g. oop->object_size in some | |
1330 // cases incorrectly returns the size in wordSize units rather than | |
1331 // HeapWordSize). | |
1332 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
1333 | |
1334 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); | |
1335 size_t max_byte_size = collector_policy()->max_heap_byte_size(); | |
1336 | |
1337 // Ensure that the sizes are properly aligned. | |
1338 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1339 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); | |
1340 | |
1341 // We allocate this in any case, but only do no work if the command line | |
1342 // param is off. | |
1343 _cg1r = new ConcurrentG1Refine(); | |
1344 | |
1345 // Reserve the maximum. | |
1346 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); | |
1347 // Includes the perm-gen. | |
1348 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), | |
1349 HeapRegion::GrainBytes, | |
1350 false /*ism*/); | |
1351 | |
1352 if (!heap_rs.is_reserved()) { | |
1353 vm_exit_during_initialization("Could not reserve enough space for object heap"); | |
1354 return JNI_ENOMEM; | |
1355 } | |
1356 | |
1357 // It is important to do this in a way such that concurrent readers can't | |
1358 // temporarily think somethings in the heap. (I've actually seen this | |
1359 // happen in asserts: DLD.) | |
1360 _reserved.set_word_size(0); | |
1361 _reserved.set_start((HeapWord*)heap_rs.base()); | |
1362 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | |
1363 | |
1364 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; | |
1365 | |
1366 _num_humongous_regions = 0; | |
1367 | |
1368 // Create the gen rem set (and barrier set) for the entire reserved region. | |
1369 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | |
1370 set_barrier_set(rem_set()->bs()); | |
1371 if (barrier_set()->is_a(BarrierSet::ModRef)) { | |
1372 _mr_bs = (ModRefBarrierSet*)_barrier_set; | |
1373 } else { | |
1374 vm_exit_during_initialization("G1 requires a mod ref bs."); | |
1375 return JNI_ENOMEM; | |
1376 } | |
1377 | |
1378 // Also create a G1 rem set. | |
1379 if (G1UseHRIntoRS) { | |
1380 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { | |
1381 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); | |
1382 } else { | |
1383 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); | |
1384 return JNI_ENOMEM; | |
1385 } | |
1386 } else { | |
1387 _g1_rem_set = new StupidG1RemSet(this); | |
1388 } | |
1389 | |
1390 // Carve out the G1 part of the heap. | |
1391 | |
1392 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | |
1393 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), | |
1394 g1_rs.size()/HeapWordSize); | |
1395 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); | |
1396 | |
1397 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); | |
1398 | |
1399 _g1_storage.initialize(g1_rs, 0); | |
1400 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); | |
1401 _g1_max_committed = _g1_committed; | |
393 | 1402 _hrs = new HeapRegionSeq(_expansion_regions); |
342 | 1403 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
1404 guarantee(_cur_alloc_region == NULL, "from constructor"); | |
1405 | |
1406 _bot_shared = new G1BlockOffsetSharedArray(_reserved, | |
1407 heap_word_size(init_byte_size)); | |
1408 | |
1409 _g1h = this; | |
1410 | |
1411 // Create the ConcurrentMark data structure and thread. | |
1412 // (Must do this late, so that "max_regions" is defined.) | |
1413 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); | |
1414 _cmThread = _cm->cmThread(); | |
1415 | |
1416 // ...and the concurrent zero-fill thread, if necessary. | |
1417 if (G1ConcZeroFill) { | |
1418 _czft = new ConcurrentZFThread(); | |
1419 } | |
1420 | |
1421 | |
1422 | |
1423 // Allocate the popular regions; take them off free lists. | |
1424 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes; | |
1425 expand(pop_byte_size); | |
1426 _popular_object_boundary = | |
1427 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords); | |
1428 for (int i = 0; i < G1NumPopularRegions; i++) { | |
1429 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords); | |
1430 // assert(hr != NULL && hr->bottom() < _popular_object_boundary, | |
1431 // "Should be enough, and all should be below boundary."); | |
1432 hr->set_popular(true); | |
1433 } | |
1434 assert(_cur_pop_hr_index == 0, "Start allocating at the first region."); | |
1435 | |
1436 // Initialize the from_card cache structure of HeapRegionRemSet. | |
1437 HeapRegionRemSet::init_heap(max_regions()); | |
1438 | |
1439 // Now expand into the rest of the initial heap size. | |
1440 expand(init_byte_size - pop_byte_size); | |
1441 | |
1442 // Perform any initialization actions delegated to the policy. | |
1443 g1_policy()->init(); | |
1444 | |
1445 g1_policy()->note_start_of_mark_thread(); | |
1446 | |
1447 _refine_cte_cl = | |
1448 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), | |
1449 g1_rem_set(), | |
1450 concurrent_g1_refine()); | |
1451 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); | |
1452 | |
1453 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, | |
1454 SATB_Q_FL_lock, | |
1455 0, | |
1456 Shared_SATB_Q_lock); | |
1457 if (G1RSBarrierUseQueue) { | |
1458 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, | |
1459 DirtyCardQ_FL_lock, | |
1460 G1DirtyCardQueueMax, | |
1461 Shared_DirtyCardQ_lock); | |
1462 } | |
1463 // In case we're keeping closure specialization stats, initialize those | |
1464 // counts and that mechanism. | |
1465 SpecializationStats::clear(); | |
1466 | |
1467 _gc_alloc_region_list = NULL; | |
1468 | |
1469 // Do later initialization work for concurrent refinement. | |
1470 _cg1r->init(); | |
1471 | |
1472 const char* group_names[] = { "CR", "ZF", "CM", "CL" }; | |
1473 GCOverheadReporter::initGCOverheadReporter(4, group_names); | |
1474 | |
1475 return JNI_OK; | |
1476 } | |
1477 | |
1478 void G1CollectedHeap::ref_processing_init() { | |
1479 SharedHeap::ref_processing_init(); | |
1480 MemRegion mr = reserved_region(); | |
1481 _ref_processor = ReferenceProcessor::create_ref_processor( | |
1482 mr, // span | |
1483 false, // Reference discovery is not atomic | |
1484 // (though it shouldn't matter here.) | |
1485 true, // mt_discovery | |
1486 NULL, // is alive closure: need to fill this in for efficiency | |
1487 ParallelGCThreads, | |
1488 ParallelRefProcEnabled, | |
1489 true); // Setting next fields of discovered | |
1490 // lists requires a barrier. | |
1491 } | |
1492 | |
1493 size_t G1CollectedHeap::capacity() const { | |
1494 return _g1_committed.byte_size(); | |
1495 } | |
1496 | |
1497 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, | |
1498 int worker_i) { | |
1499 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
1500 int n_completed_buffers = 0; | |
1501 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { | |
1502 n_completed_buffers++; | |
1503 } | |
1504 g1_policy()->record_update_rs_processed_buffers(worker_i, | |
1505 (double) n_completed_buffers); | |
1506 dcqs.clear_n_completed_buffers(); | |
1507 // Finish up the queue... | |
1508 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, | |
1509 g1_rem_set()); | |
1510 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); | |
1511 } | |
1512 | |
1513 | |
1514 // Computes the sum of the storage used by the various regions. | |
1515 | |
1516 size_t G1CollectedHeap::used() const { | |
1517 assert(Heap_lock->owner() != NULL, | |
1518 "Should be owned on this thread's behalf."); | |
1519 size_t result = _summary_bytes_used; | |
1520 if (_cur_alloc_region != NULL) | |
1521 result += _cur_alloc_region->used(); | |
1522 return result; | |
1523 } | |
1524 | |
1525 class SumUsedClosure: public HeapRegionClosure { | |
1526 size_t _used; | |
1527 public: | |
1528 SumUsedClosure() : _used(0) {} | |
1529 bool doHeapRegion(HeapRegion* r) { | |
1530 if (!r->continuesHumongous()) { | |
1531 _used += r->used(); | |
1532 } | |
1533 return false; | |
1534 } | |
1535 size_t result() { return _used; } | |
1536 }; | |
1537 | |
1538 size_t G1CollectedHeap::recalculate_used() const { | |
1539 SumUsedClosure blk; | |
1540 _hrs->iterate(&blk); | |
1541 return blk.result(); | |
1542 } | |
1543 | |
1544 #ifndef PRODUCT | |
1545 class SumUsedRegionsClosure: public HeapRegionClosure { | |
1546 size_t _num; | |
1547 public: | |
1548 // _num is set to 1 to account for the popular region | |
1549 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {} | |
1550 bool doHeapRegion(HeapRegion* r) { | |
1551 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { | |
1552 _num += 1; | |
1553 } | |
1554 return false; | |
1555 } | |
1556 size_t result() { return _num; } | |
1557 }; | |
1558 | |
1559 size_t G1CollectedHeap::recalculate_used_regions() const { | |
1560 SumUsedRegionsClosure blk; | |
1561 _hrs->iterate(&blk); | |
1562 return blk.result(); | |
1563 } | |
1564 #endif // PRODUCT | |
1565 | |
1566 size_t G1CollectedHeap::unsafe_max_alloc() { | |
1567 if (_free_regions > 0) return HeapRegion::GrainBytes; | |
1568 // otherwise, is there space in the current allocation region? | |
1569 | |
1570 // We need to store the current allocation region in a local variable | |
1571 // here. The problem is that this method doesn't take any locks and | |
1572 // there may be other threads which overwrite the current allocation | |
1573 // region field. attempt_allocation(), for example, sets it to NULL | |
1574 // and this can happen *after* the NULL check here but before the call | |
1575 // to free(), resulting in a SIGSEGV. Note that this doesn't appear | |
1576 // to be a problem in the optimized build, since the two loads of the | |
1577 // current allocation region field are optimized away. | |
1578 HeapRegion* car = _cur_alloc_region; | |
1579 | |
1580 // FIXME: should iterate over all regions? | |
1581 if (car == NULL) { | |
1582 return 0; | |
1583 } | |
1584 return car->free(); | |
1585 } | |
1586 | |
1587 void G1CollectedHeap::collect(GCCause::Cause cause) { | |
1588 // The caller doesn't have the Heap_lock | |
1589 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
1590 MutexLocker ml(Heap_lock); | |
1591 collect_locked(cause); | |
1592 } | |
1593 | |
1594 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
1595 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
1596 assert(Heap_lock->is_locked(), "Precondition#2"); | |
1597 GCCauseSetter gcs(this, cause); | |
1598 switch (cause) { | |
1599 case GCCause::_heap_inspection: | |
1600 case GCCause::_heap_dump: { | |
1601 HandleMark hm; | |
1602 do_full_collection(false); // don't clear all soft refs | |
1603 break; | |
1604 } | |
1605 default: // XXX FIX ME | |
1606 ShouldNotReachHere(); // Unexpected use of this function | |
1607 } | |
1608 } | |
1609 | |
1610 | |
1611 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { | |
1612 // Don't want to do a GC until cleanup is completed. | |
1613 wait_for_cleanup_complete(); | |
1614 | |
1615 // Read the GC count while holding the Heap_lock | |
1616 int gc_count_before = SharedHeap::heap()->total_collections(); | |
1617 { | |
1618 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
1619 VM_G1CollectFull op(gc_count_before, cause); | |
1620 VMThread::execute(&op); | |
1621 } | |
1622 } | |
1623 | |
1624 bool G1CollectedHeap::is_in(const void* p) const { | |
1625 if (_g1_committed.contains(p)) { | |
1626 HeapRegion* hr = _hrs->addr_to_region(p); | |
1627 return hr->is_in(p); | |
1628 } else { | |
1629 return _perm_gen->as_gen()->is_in(p); | |
1630 } | |
1631 } | |
1632 | |
1633 // Iteration functions. | |
1634 | |
1635 // Iterates an OopClosure over all ref-containing fields of objects | |
1636 // within a HeapRegion. | |
1637 | |
1638 class IterateOopClosureRegionClosure: public HeapRegionClosure { | |
1639 MemRegion _mr; | |
1640 OopClosure* _cl; | |
1641 public: | |
1642 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) | |
1643 : _mr(mr), _cl(cl) {} | |
1644 bool doHeapRegion(HeapRegion* r) { | |
1645 if (! r->continuesHumongous()) { | |
1646 r->oop_iterate(_cl); | |
1647 } | |
1648 return false; | |
1649 } | |
1650 }; | |
1651 | |
1652 void G1CollectedHeap::oop_iterate(OopClosure* cl) { | |
1653 IterateOopClosureRegionClosure blk(_g1_committed, cl); | |
1654 _hrs->iterate(&blk); | |
1655 } | |
1656 | |
1657 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
1658 IterateOopClosureRegionClosure blk(mr, cl); | |
1659 _hrs->iterate(&blk); | |
1660 } | |
1661 | |
1662 // Iterates an ObjectClosure over all objects within a HeapRegion. | |
1663 | |
1664 class IterateObjectClosureRegionClosure: public HeapRegionClosure { | |
1665 ObjectClosure* _cl; | |
1666 public: | |
1667 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} | |
1668 bool doHeapRegion(HeapRegion* r) { | |
1669 if (! r->continuesHumongous()) { | |
1670 r->object_iterate(_cl); | |
1671 } | |
1672 return false; | |
1673 } | |
1674 }; | |
1675 | |
1676 void G1CollectedHeap::object_iterate(ObjectClosure* cl) { | |
1677 IterateObjectClosureRegionClosure blk(cl); | |
1678 _hrs->iterate(&blk); | |
1679 } | |
1680 | |
1681 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
1682 // FIXME: is this right? | |
1683 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); | |
1684 } | |
1685 | |
1686 // Calls a SpaceClosure on a HeapRegion. | |
1687 | |
1688 class SpaceClosureRegionClosure: public HeapRegionClosure { | |
1689 SpaceClosure* _cl; | |
1690 public: | |
1691 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} | |
1692 bool doHeapRegion(HeapRegion* r) { | |
1693 _cl->do_space(r); | |
1694 return false; | |
1695 } | |
1696 }; | |
1697 | |
1698 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { | |
1699 SpaceClosureRegionClosure blk(cl); | |
1700 _hrs->iterate(&blk); | |
1701 } | |
1702 | |
1703 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { | |
1704 _hrs->iterate(cl); | |
1705 } | |
1706 | |
1707 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, | |
1708 HeapRegionClosure* cl) { | |
1709 _hrs->iterate_from(r, cl); | |
1710 } | |
1711 | |
1712 void | |
1713 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { | |
1714 _hrs->iterate_from(idx, cl); | |
1715 } | |
1716 | |
1717 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | |
1718 | |
1719 void | |
1720 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | |
1721 int worker, | |
1722 jint claim_value) { | |
355 | 1723 const size_t regions = n_regions(); |
1724 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | |
1725 // try to spread out the starting points of the workers | |
1726 const size_t start_index = regions / worker_num * (size_t) worker; | |
1727 | |
1728 // each worker will actually look at all regions | |
1729 for (size_t count = 0; count < regions; ++count) { | |
1730 const size_t index = (start_index + count) % regions; | |
1731 assert(0 <= index && index < regions, "sanity"); | |
1732 HeapRegion* r = region_at(index); | |
1733 // we'll ignore "continues humongous" regions (we'll process them | |
1734 // when we come across their corresponding "start humongous" | |
1735 // region) and regions already claimed | |
1736 if (r->claim_value() == claim_value || r->continuesHumongous()) { | |
1737 continue; | |
1738 } | |
1739 // OK, try to claim it | |
342 | 1740 if (r->claimHeapRegion(claim_value)) { |
355 | 1741 // success! |
1742 assert(!r->continuesHumongous(), "sanity"); | |
1743 if (r->startsHumongous()) { | |
1744 // If the region is "starts humongous" we'll iterate over its | |
1745 // "continues humongous" first; in fact we'll do them | |
1746 // first. The order is important. In on case, calling the | |
1747 // closure on the "starts humongous" region might de-allocate | |
1748 // and clear all its "continues humongous" regions and, as a | |
1749 // result, we might end up processing them twice. So, we'll do | |
1750 // them first (notice: most closures will ignore them anyway) and | |
1751 // then we'll do the "starts humongous" region. | |
1752 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1753 HeapRegion* chr = region_at(ch_index); | |
1754 | |
1755 // if the region has already been claimed or it's not | |
1756 // "continues humongous" we're done | |
1757 if (chr->claim_value() == claim_value || | |
1758 !chr->continuesHumongous()) { | |
1759 break; | |
1760 } | |
1761 | |
1762 // Noone should have claimed it directly. We can given | |
1763 // that we claimed its "starts humongous" region. | |
1764 assert(chr->claim_value() != claim_value, "sanity"); | |
1765 assert(chr->humongous_start_region() == r, "sanity"); | |
1766 | |
1767 if (chr->claimHeapRegion(claim_value)) { | |
1768 // we should always be able to claim it; noone else should | |
1769 // be trying to claim this region | |
1770 | |
1771 bool res2 = cl->doHeapRegion(chr); | |
1772 assert(!res2, "Should not abort"); | |
1773 | |
1774 // Right now, this holds (i.e., no closure that actually | |
1775 // does something with "continues humongous" regions | |
1776 // clears them). We might have to weaken it in the future, | |
1777 // but let's leave these two asserts here for extra safety. | |
1778 assert(chr->continuesHumongous(), "should still be the case"); | |
1779 assert(chr->humongous_start_region() == r, "sanity"); | |
1780 } else { | |
1781 guarantee(false, "we should not reach here"); | |
1782 } | |
1783 } | |
1784 } | |
1785 | |
1786 assert(!r->continuesHumongous(), "sanity"); | |
1787 bool res = cl->doHeapRegion(r); | |
1788 assert(!res, "Should not abort"); | |
1789 } | |
1790 } | |
1791 } | |
1792 | |
390 | 1793 class ResetClaimValuesClosure: public HeapRegionClosure { |
1794 public: | |
1795 bool doHeapRegion(HeapRegion* r) { | |
1796 r->set_claim_value(HeapRegion::InitialClaimValue); | |
1797 return false; | |
1798 } | |
1799 }; | |
1800 | |
1801 void | |
1802 G1CollectedHeap::reset_heap_region_claim_values() { | |
1803 ResetClaimValuesClosure blk; | |
1804 heap_region_iterate(&blk); | |
1805 } | |
1806 | |
355 | 1807 #ifdef ASSERT |
1808 // This checks whether all regions in the heap have the correct claim | |
1809 // value. I also piggy-backed on this a check to ensure that the | |
1810 // humongous_start_region() information on "continues humongous" | |
1811 // regions is correct. | |
1812 | |
1813 class CheckClaimValuesClosure : public HeapRegionClosure { | |
1814 private: | |
1815 jint _claim_value; | |
1816 size_t _failures; | |
1817 HeapRegion* _sh_region; | |
1818 public: | |
1819 CheckClaimValuesClosure(jint claim_value) : | |
1820 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
1821 bool doHeapRegion(HeapRegion* r) { | |
1822 if (r->claim_value() != _claim_value) { | |
1823 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1824 "claim value = %d, should be %d", | |
1825 r->bottom(), r->end(), r->claim_value(), | |
1826 _claim_value); | |
1827 ++_failures; | |
1828 } | |
1829 if (!r->isHumongous()) { | |
1830 _sh_region = NULL; | |
1831 } else if (r->startsHumongous()) { | |
1832 _sh_region = r; | |
1833 } else if (r->continuesHumongous()) { | |
1834 if (r->humongous_start_region() != _sh_region) { | |
1835 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1836 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
1837 r->bottom(), r->end(), | |
1838 r->humongous_start_region(), | |
1839 _sh_region); | |
1840 ++_failures; | |
342 | 1841 } |
1842 } | |
355 | 1843 return false; |
1844 } | |
1845 size_t failures() { | |
1846 return _failures; | |
1847 } | |
1848 }; | |
1849 | |
1850 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
1851 CheckClaimValuesClosure cl(claim_value); | |
1852 heap_region_iterate(&cl); | |
1853 return cl.failures() == 0; | |
1854 } | |
1855 #endif // ASSERT | |
342 | 1856 |
1857 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | |
1858 HeapRegion* r = g1_policy()->collection_set(); | |
1859 while (r != NULL) { | |
1860 HeapRegion* next = r->next_in_collection_set(); | |
1861 if (cl->doHeapRegion(r)) { | |
1862 cl->incomplete(); | |
1863 return; | |
1864 } | |
1865 r = next; | |
1866 } | |
1867 } | |
1868 | |
1869 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, | |
1870 HeapRegionClosure *cl) { | |
1871 assert(r->in_collection_set(), | |
1872 "Start region must be a member of the collection set."); | |
1873 HeapRegion* cur = r; | |
1874 while (cur != NULL) { | |
1875 HeapRegion* next = cur->next_in_collection_set(); | |
1876 if (cl->doHeapRegion(cur) && false) { | |
1877 cl->incomplete(); | |
1878 return; | |
1879 } | |
1880 cur = next; | |
1881 } | |
1882 cur = g1_policy()->collection_set(); | |
1883 while (cur != r) { | |
1884 HeapRegion* next = cur->next_in_collection_set(); | |
1885 if (cl->doHeapRegion(cur) && false) { | |
1886 cl->incomplete(); | |
1887 return; | |
1888 } | |
1889 cur = next; | |
1890 } | |
1891 } | |
1892 | |
1893 CompactibleSpace* G1CollectedHeap::first_compactible_space() { | |
1894 return _hrs->length() > 0 ? _hrs->at(0) : NULL; | |
1895 } | |
1896 | |
1897 | |
1898 Space* G1CollectedHeap::space_containing(const void* addr) const { | |
1899 Space* res = heap_region_containing(addr); | |
1900 if (res == NULL) | |
1901 res = perm_gen()->space_containing(addr); | |
1902 return res; | |
1903 } | |
1904 | |
1905 HeapWord* G1CollectedHeap::block_start(const void* addr) const { | |
1906 Space* sp = space_containing(addr); | |
1907 if (sp != NULL) { | |
1908 return sp->block_start(addr); | |
1909 } | |
1910 return NULL; | |
1911 } | |
1912 | |
1913 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { | |
1914 Space* sp = space_containing(addr); | |
1915 assert(sp != NULL, "block_size of address outside of heap"); | |
1916 return sp->block_size(addr); | |
1917 } | |
1918 | |
1919 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { | |
1920 Space* sp = space_containing(addr); | |
1921 return sp->block_is_obj(addr); | |
1922 } | |
1923 | |
1924 bool G1CollectedHeap::supports_tlab_allocation() const { | |
1925 return true; | |
1926 } | |
1927 | |
1928 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { | |
1929 return HeapRegion::GrainBytes; | |
1930 } | |
1931 | |
1932 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { | |
1933 // Return the remaining space in the cur alloc region, but not less than | |
1934 // the min TLAB size. | |
1935 // Also, no more than half the region size, since we can't allow tlabs to | |
1936 // grow big enough to accomodate humongous objects. | |
1937 | |
1938 // We need to story it locally, since it might change between when we | |
1939 // test for NULL and when we use it later. | |
1940 ContiguousSpace* cur_alloc_space = _cur_alloc_region; | |
1941 if (cur_alloc_space == NULL) { | |
1942 return HeapRegion::GrainBytes/2; | |
1943 } else { | |
1944 return MAX2(MIN2(cur_alloc_space->free(), | |
1945 (size_t)(HeapRegion::GrainBytes/2)), | |
1946 (size_t)MinTLABSize); | |
1947 } | |
1948 } | |
1949 | |
1950 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { | |
1951 bool dummy; | |
1952 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); | |
1953 } | |
1954 | |
1955 bool G1CollectedHeap::allocs_are_zero_filled() { | |
1956 return false; | |
1957 } | |
1958 | |
1959 size_t G1CollectedHeap::large_typearray_limit() { | |
1960 // FIXME | |
1961 return HeapRegion::GrainBytes/HeapWordSize; | |
1962 } | |
1963 | |
1964 size_t G1CollectedHeap::max_capacity() const { | |
1965 return _g1_committed.byte_size(); | |
1966 } | |
1967 | |
1968 jlong G1CollectedHeap::millis_since_last_gc() { | |
1969 // assert(false, "NYI"); | |
1970 return 0; | |
1971 } | |
1972 | |
1973 | |
1974 void G1CollectedHeap::prepare_for_verify() { | |
1975 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
1976 ensure_parsability(false); | |
1977 } | |
1978 g1_rem_set()->prepare_for_verify(); | |
1979 } | |
1980 | |
1981 class VerifyLivenessOopClosure: public OopClosure { | |
1982 G1CollectedHeap* g1h; | |
1983 public: | |
1984 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { | |
1985 g1h = _g1h; | |
1986 } | |
1987 void do_oop(narrowOop *p) { | |
1988 guarantee(false, "NYI"); | |
1989 } | |
1990 void do_oop(oop *p) { | |
1991 oop obj = *p; | |
1992 assert(obj == NULL || !g1h->is_obj_dead(obj), | |
1993 "Dead object referenced by a not dead object"); | |
1994 } | |
1995 }; | |
1996 | |
1997 class VerifyObjsInRegionClosure: public ObjectClosure { | |
1998 G1CollectedHeap* _g1h; | |
1999 size_t _live_bytes; | |
2000 HeapRegion *_hr; | |
2001 public: | |
2002 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { | |
2003 _g1h = G1CollectedHeap::heap(); | |
2004 } | |
2005 void do_object(oop o) { | |
2006 VerifyLivenessOopClosure isLive(_g1h); | |
2007 assert(o != NULL, "Huh?"); | |
2008 if (!_g1h->is_obj_dead(o)) { | |
2009 o->oop_iterate(&isLive); | |
2010 if (!_hr->obj_allocated_since_prev_marking(o)) | |
2011 _live_bytes += (o->size() * HeapWordSize); | |
2012 } | |
2013 } | |
2014 size_t live_bytes() { return _live_bytes; } | |
2015 }; | |
2016 | |
2017 class PrintObjsInRegionClosure : public ObjectClosure { | |
2018 HeapRegion *_hr; | |
2019 G1CollectedHeap *_g1; | |
2020 public: | |
2021 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { | |
2022 _g1 = G1CollectedHeap::heap(); | |
2023 }; | |
2024 | |
2025 void do_object(oop o) { | |
2026 if (o != NULL) { | |
2027 HeapWord *start = (HeapWord *) o; | |
2028 size_t word_sz = o->size(); | |
2029 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT | |
2030 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", | |
2031 (void*) o, word_sz, | |
2032 _g1->isMarkedPrev(o), | |
2033 _g1->isMarkedNext(o), | |
2034 _hr->obj_allocated_since_prev_marking(o)); | |
2035 HeapWord *end = start + word_sz; | |
2036 HeapWord *cur; | |
2037 int *val; | |
2038 for (cur = start; cur < end; cur++) { | |
2039 val = (int *) cur; | |
2040 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); | |
2041 } | |
2042 } | |
2043 } | |
2044 }; | |
2045 | |
2046 class VerifyRegionClosure: public HeapRegionClosure { | |
2047 public: | |
2048 bool _allow_dirty; | |
390 | 2049 bool _par; |
2050 VerifyRegionClosure(bool allow_dirty, bool par = false) | |
2051 : _allow_dirty(allow_dirty), _par(par) {} | |
342 | 2052 bool doHeapRegion(HeapRegion* r) { |
390 | 2053 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
2054 "Should be unclaimed at verify points."); | |
342 | 2055 if (r->isHumongous()) { |
2056 if (r->startsHumongous()) { | |
2057 // Verify the single H object. | |
2058 oop(r->bottom())->verify(); | |
2059 size_t word_sz = oop(r->bottom())->size(); | |
2060 guarantee(r->top() == r->bottom() + word_sz, | |
2061 "Only one object in a humongous region"); | |
2062 } | |
2063 } else { | |
2064 VerifyObjsInRegionClosure not_dead_yet_cl(r); | |
2065 r->verify(_allow_dirty); | |
2066 r->object_iterate(¬_dead_yet_cl); | |
2067 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), | |
2068 "More live objects than counted in last complete marking."); | |
2069 } | |
2070 return false; | |
2071 } | |
2072 }; | |
2073 | |
2074 class VerifyRootsClosure: public OopsInGenClosure { | |
2075 private: | |
2076 G1CollectedHeap* _g1h; | |
2077 bool _failures; | |
2078 | |
2079 public: | |
2080 VerifyRootsClosure() : | |
2081 _g1h(G1CollectedHeap::heap()), _failures(false) { } | |
2082 | |
2083 bool failures() { return _failures; } | |
2084 | |
2085 void do_oop(narrowOop* p) { | |
2086 guarantee(false, "NYI"); | |
2087 } | |
2088 | |
2089 void do_oop(oop* p) { | |
2090 oop obj = *p; | |
2091 if (obj != NULL) { | |
2092 if (_g1h->is_obj_dead(obj)) { | |
2093 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " | |
2094 "points to dead obj "PTR_FORMAT, p, (void*) obj); | |
2095 obj->print_on(gclog_or_tty); | |
2096 _failures = true; | |
2097 } | |
2098 } | |
2099 } | |
2100 }; | |
2101 | |
390 | 2102 // This is the task used for parallel heap verification. |
2103 | |
2104 class G1ParVerifyTask: public AbstractGangTask { | |
2105 private: | |
2106 G1CollectedHeap* _g1h; | |
2107 bool _allow_dirty; | |
2108 | |
2109 public: | |
2110 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : | |
2111 AbstractGangTask("Parallel verify task"), | |
2112 _g1h(g1h), _allow_dirty(allow_dirty) { } | |
2113 | |
2114 void work(int worker_i) { | |
2115 VerifyRegionClosure blk(_allow_dirty, true); | |
2116 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, | |
2117 HeapRegion::ParVerifyClaimValue); | |
2118 } | |
2119 }; | |
2120 | |
342 | 2121 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
2122 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { | |
2123 if (!silent) { gclog_or_tty->print("roots "); } | |
2124 VerifyRootsClosure rootsCl; | |
2125 process_strong_roots(false, | |
2126 SharedHeap::SO_AllClasses, | |
2127 &rootsCl, | |
2128 &rootsCl); | |
2129 rem_set()->invalidate(perm_gen()->used_region(), false); | |
2130 if (!silent) { gclog_or_tty->print("heapRegions "); } | |
390 | 2131 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
2132 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2133 "sanity check"); | |
2134 | |
2135 G1ParVerifyTask task(this, allow_dirty); | |
2136 int n_workers = workers()->total_workers(); | |
2137 set_par_threads(n_workers); | |
2138 workers()->run_task(&task); | |
2139 set_par_threads(0); | |
2140 | |
2141 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), | |
2142 "sanity check"); | |
2143 | |
2144 reset_heap_region_claim_values(); | |
2145 | |
2146 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), | |
2147 "sanity check"); | |
2148 } else { | |
2149 VerifyRegionClosure blk(allow_dirty); | |
2150 _hrs->iterate(&blk); | |
2151 } | |
342 | 2152 if (!silent) gclog_or_tty->print("remset "); |
2153 rem_set()->verify(); | |
2154 guarantee(!rootsCl.failures(), "should not have had failures"); | |
2155 } else { | |
2156 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); | |
2157 } | |
2158 } | |
2159 | |
2160 class PrintRegionClosure: public HeapRegionClosure { | |
2161 outputStream* _st; | |
2162 public: | |
2163 PrintRegionClosure(outputStream* st) : _st(st) {} | |
2164 bool doHeapRegion(HeapRegion* r) { | |
2165 r->print_on(_st); | |
2166 return false; | |
2167 } | |
2168 }; | |
2169 | |
2170 void G1CollectedHeap::print() const { print_on(gclog_or_tty); } | |
2171 | |
2172 void G1CollectedHeap::print_on(outputStream* st) const { | |
2173 PrintRegionClosure blk(st); | |
2174 _hrs->iterate(&blk); | |
2175 } | |
2176 | |
2177 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { | |
2178 if (ParallelGCThreads > 0) { | |
2179 workers()->print_worker_threads(); | |
2180 } | |
2181 st->print("\"G1 concurrent mark GC Thread\" "); | |
2182 _cmThread->print(); | |
2183 st->cr(); | |
2184 st->print("\"G1 concurrent refinement GC Thread\" "); | |
2185 _cg1r->cg1rThread()->print_on(st); | |
2186 st->cr(); | |
2187 st->print("\"G1 zero-fill GC Thread\" "); | |
2188 _czft->print_on(st); | |
2189 st->cr(); | |
2190 } | |
2191 | |
2192 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
2193 if (ParallelGCThreads > 0) { | |
2194 workers()->threads_do(tc); | |
2195 } | |
2196 tc->do_thread(_cmThread); | |
2197 tc->do_thread(_cg1r->cg1rThread()); | |
2198 tc->do_thread(_czft); | |
2199 } | |
2200 | |
2201 void G1CollectedHeap::print_tracing_info() const { | |
2202 concurrent_g1_refine()->print_final_card_counts(); | |
2203 | |
2204 // We'll overload this to mean "trace GC pause statistics." | |
2205 if (TraceGen0Time || TraceGen1Time) { | |
2206 // The "G1CollectorPolicy" is keeping track of these stats, so delegate | |
2207 // to that. | |
2208 g1_policy()->print_tracing_info(); | |
2209 } | |
2210 if (SummarizeG1RSStats) { | |
2211 g1_rem_set()->print_summary_info(); | |
2212 } | |
2213 if (SummarizeG1ConcMark) { | |
2214 concurrent_mark()->print_summary_info(); | |
2215 } | |
2216 if (SummarizeG1ZFStats) { | |
2217 ConcurrentZFThread::print_summary_info(); | |
2218 } | |
2219 if (G1SummarizePopularity) { | |
2220 print_popularity_summary_info(); | |
2221 } | |
2222 g1_policy()->print_yg_surv_rate_info(); | |
2223 | |
2224 GCOverheadReporter::printGCOverhead(); | |
2225 | |
2226 SpecializationStats::print(); | |
2227 } | |
2228 | |
2229 | |
2230 int G1CollectedHeap::addr_to_arena_id(void* addr) const { | |
2231 HeapRegion* hr = heap_region_containing(addr); | |
2232 if (hr == NULL) { | |
2233 return 0; | |
2234 } else { | |
2235 return 1; | |
2236 } | |
2237 } | |
2238 | |
2239 G1CollectedHeap* G1CollectedHeap::heap() { | |
2240 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, | |
2241 "not a garbage-first heap"); | |
2242 return _g1h; | |
2243 } | |
2244 | |
2245 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { | |
2246 if (PrintHeapAtGC){ | |
2247 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); | |
2248 Universe::print(); | |
2249 } | |
2250 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
2251 // Call allocation profiler | |
2252 AllocationProfiler::iterate_since_last_gc(); | |
2253 // Fill TLAB's and such | |
2254 ensure_parsability(true); | |
2255 } | |
2256 | |
2257 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { | |
2258 // FIXME: what is this about? | |
2259 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" | |
2260 // is set. | |
2261 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), | |
2262 "derived pointer present")); | |
2263 | |
2264 if (PrintHeapAtGC){ | |
2265 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); | |
2266 Universe::print(); | |
2267 gclog_or_tty->print("} "); | |
2268 } | |
2269 } | |
2270 | |
2271 void G1CollectedHeap::do_collection_pause() { | |
2272 // Read the GC count while holding the Heap_lock | |
2273 // we need to do this _before_ wait_for_cleanup_complete(), to | |
2274 // ensure that we do not give up the heap lock and potentially | |
2275 // pick up the wrong count | |
2276 int gc_count_before = SharedHeap::heap()->total_collections(); | |
2277 | |
2278 // Don't want to do a GC pause while cleanup is being completed! | |
2279 wait_for_cleanup_complete(); | |
2280 | |
2281 g1_policy()->record_stop_world_start(); | |
2282 { | |
2283 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
2284 VM_G1IncCollectionPause op(gc_count_before); | |
2285 VMThread::execute(&op); | |
2286 } | |
2287 } | |
2288 | |
2289 void | |
2290 G1CollectedHeap::doConcurrentMark() { | |
2291 if (G1ConcMark) { | |
2292 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2293 if (!_cmThread->in_progress()) { | |
2294 _cmThread->set_started(); | |
2295 CGC_lock->notify(); | |
2296 } | |
2297 } | |
2298 } | |
2299 | |
2300 class VerifyMarkedObjsClosure: public ObjectClosure { | |
2301 G1CollectedHeap* _g1h; | |
2302 public: | |
2303 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | |
2304 void do_object(oop obj) { | |
2305 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, | |
2306 "markandsweep mark should agree with concurrent deadness"); | |
2307 } | |
2308 }; | |
2309 | |
2310 void | |
2311 G1CollectedHeap::checkConcurrentMark() { | |
2312 VerifyMarkedObjsClosure verifycl(this); | |
2313 doConcurrentMark(); | |
2314 // MutexLockerEx x(getMarkBitMapLock(), | |
2315 // Mutex::_no_safepoint_check_flag); | |
2316 object_iterate(&verifycl); | |
2317 } | |
2318 | |
2319 void G1CollectedHeap::do_sync_mark() { | |
2320 _cm->checkpointRootsInitial(); | |
2321 _cm->markFromRoots(); | |
2322 _cm->checkpointRootsFinal(false); | |
2323 } | |
2324 | |
2325 // <NEW PREDICTION> | |
2326 | |
2327 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, | |
2328 bool young) { | |
2329 return _g1_policy->predict_region_elapsed_time_ms(hr, young); | |
2330 } | |
2331 | |
2332 void G1CollectedHeap::check_if_region_is_too_expensive(double | |
2333 predicted_time_ms) { | |
2334 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); | |
2335 } | |
2336 | |
2337 size_t G1CollectedHeap::pending_card_num() { | |
2338 size_t extra_cards = 0; | |
2339 JavaThread *curr = Threads::first(); | |
2340 while (curr != NULL) { | |
2341 DirtyCardQueue& dcq = curr->dirty_card_queue(); | |
2342 extra_cards += dcq.size(); | |
2343 curr = curr->next(); | |
2344 } | |
2345 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2346 size_t buffer_size = dcqs.buffer_size(); | |
2347 size_t buffer_num = dcqs.completed_buffers_num(); | |
2348 return buffer_size * buffer_num + extra_cards; | |
2349 } | |
2350 | |
2351 size_t G1CollectedHeap::max_pending_card_num() { | |
2352 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); | |
2353 size_t buffer_size = dcqs.buffer_size(); | |
2354 size_t buffer_num = dcqs.completed_buffers_num(); | |
2355 int thread_num = Threads::number_of_threads(); | |
2356 return (buffer_num + thread_num) * buffer_size; | |
2357 } | |
2358 | |
2359 size_t G1CollectedHeap::cards_scanned() { | |
2360 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); | |
2361 return g1_rset->cardsScanned(); | |
2362 } | |
2363 | |
2364 void | |
2365 G1CollectedHeap::setup_surviving_young_words() { | |
2366 guarantee( _surviving_young_words == NULL, "pre-condition" ); | |
2367 size_t array_length = g1_policy()->young_cset_length(); | |
2368 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); | |
2369 if (_surviving_young_words == NULL) { | |
2370 vm_exit_out_of_memory(sizeof(size_t) * array_length, | |
2371 "Not enough space for young surv words summary."); | |
2372 } | |
2373 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); | |
2374 for (size_t i = 0; i < array_length; ++i) { | |
2375 guarantee( _surviving_young_words[i] == 0, "invariant" ); | |
2376 } | |
2377 } | |
2378 | |
2379 void | |
2380 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { | |
2381 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2382 size_t array_length = g1_policy()->young_cset_length(); | |
2383 for (size_t i = 0; i < array_length; ++i) | |
2384 _surviving_young_words[i] += surv_young_words[i]; | |
2385 } | |
2386 | |
2387 void | |
2388 G1CollectedHeap::cleanup_surviving_young_words() { | |
2389 guarantee( _surviving_young_words != NULL, "pre-condition" ); | |
2390 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); | |
2391 _surviving_young_words = NULL; | |
2392 } | |
2393 | |
2394 // </NEW PREDICTION> | |
2395 | |
2396 void | |
2397 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { | |
2398 char verbose_str[128]; | |
2399 sprintf(verbose_str, "GC pause "); | |
2400 if (popular_region != NULL) | |
2401 strcat(verbose_str, "(popular)"); | |
2402 else if (g1_policy()->in_young_gc_mode()) { | |
2403 if (g1_policy()->full_young_gcs()) | |
2404 strcat(verbose_str, "(young)"); | |
2405 else | |
2406 strcat(verbose_str, "(partial)"); | |
2407 } | |
2408 bool reset_should_initiate_conc_mark = false; | |
2409 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) { | |
2410 // we currently do not allow an initial mark phase to be piggy-backed | |
2411 // on a popular pause | |
2412 reset_should_initiate_conc_mark = true; | |
2413 g1_policy()->unset_should_initiate_conc_mark(); | |
2414 } | |
2415 if (g1_policy()->should_initiate_conc_mark()) | |
2416 strcat(verbose_str, " (initial-mark)"); | |
2417 | |
2418 GCCauseSetter x(this, (popular_region == NULL ? | |
2419 GCCause::_g1_inc_collection_pause : | |
2420 GCCause::_g1_pop_region_collection_pause)); | |
2421 | |
2422 // if PrintGCDetails is on, we'll print long statistics information | |
2423 // in the collector policy code, so let's not print this as the output | |
2424 // is messy if we do. | |
2425 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
2426 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
2427 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
2428 | |
2429 ResourceMark rm; | |
2430 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
2431 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); | |
2432 guarantee(!is_gc_active(), "collection is not reentrant"); | |
2433 assert(regions_accounted_for(), "Region leakage!"); | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
352
diff
changeset
|
2434 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
352
diff
changeset
|
2435 increment_gc_time_stamp(); |
342 | 2436 |
2437 if (g1_policy()->in_young_gc_mode()) { | |
2438 assert(check_young_list_well_formed(), | |
2439 "young list should be well formed"); | |
2440 } | |
2441 | |
2442 if (GC_locker::is_active()) { | |
2443 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
2444 } | |
2445 | |
2446 bool abandoned = false; | |
2447 { // Call to jvmpi::post_class_unload_events must occur outside of active GC | |
2448 IsGCActiveMark x; | |
2449 | |
2450 gc_prologue(false); | |
2451 increment_total_collections(); | |
2452 | |
2453 #if G1_REM_SET_LOGGING | |
2454 gclog_or_tty->print_cr("\nJust chose CS, heap:"); | |
2455 print(); | |
2456 #endif | |
2457 | |
2458 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { | |
2459 HandleMark hm; // Discard invalid handles created during verification | |
2460 prepare_for_verify(); | |
2461 gclog_or_tty->print(" VerifyBeforeGC:"); | |
2462 Universe::verify(false); | |
2463 } | |
2464 | |
2465 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
2466 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
393
diff
changeset
|
2467 // We want to turn off ref discovery, if necessary, and turn it back on |
342 | 2468 // on again later if we do. |
2469 bool was_enabled = ref_processor()->discovery_enabled(); | |
2470 if (was_enabled) ref_processor()->disable_discovery(); | |
2471 | |
2472 // Forget the current alloc region (we might even choose it to be part | |
2473 // of the collection set!). | |
2474 abandon_cur_alloc_region(); | |
2475 | |
2476 // The elapsed time induced by the start time below deliberately elides | |
2477 // the possible verification above. | |
2478 double start_time_sec = os::elapsedTime(); | |
2479 GCOverheadReporter::recordSTWStart(start_time_sec); | |
2480 size_t start_used_bytes = used(); | |
2481 if (!G1ConcMark) { | |
2482 do_sync_mark(); | |
2483 } | |
2484 | |
2485 g1_policy()->record_collection_pause_start(start_time_sec, | |
2486 start_used_bytes); | |
2487 | |
2488 #if SCAN_ONLY_VERBOSE | |
2489 _young_list->print(); | |
2490 #endif // SCAN_ONLY_VERBOSE | |
2491 | |
2492 if (g1_policy()->should_initiate_conc_mark()) { | |
2493 concurrent_mark()->checkpointRootsInitialPre(); | |
2494 } | |
2495 save_marks(); | |
2496 | |
2497 // We must do this before any possible evacuation that should propogate | |
2498 // marks, including evacuation of popular objects in a popular pause. | |
2499 if (mark_in_progress()) { | |
2500 double start_time_sec = os::elapsedTime(); | |
2501 | |
2502 _cm->drainAllSATBBuffers(); | |
2503 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; | |
2504 g1_policy()->record_satb_drain_time(finish_mark_ms); | |
2505 | |
2506 } | |
2507 // Record the number of elements currently on the mark stack, so we | |
2508 // only iterate over these. (Since evacuation may add to the mark | |
2509 // stack, doing more exposes race conditions.) If no mark is in | |
2510 // progress, this will be zero. | |
2511 _cm->set_oops_do_bound(); | |
2512 | |
2513 assert(regions_accounted_for(), "Region leakage."); | |
2514 | |
2515 bool abandoned = false; | |
2516 | |
2517 if (mark_in_progress()) | |
2518 concurrent_mark()->newCSet(); | |
2519 | |
2520 // Now choose the CS. | |
2521 if (popular_region == NULL) { | |
2522 g1_policy()->choose_collection_set(); | |
2523 } else { | |
2524 // We may be evacuating a single region (for popularity). | |
2525 g1_policy()->record_popular_pause_preamble_start(); | |
2526 popularity_pause_preamble(popular_region); | |
2527 g1_policy()->record_popular_pause_preamble_end(); | |
2528 abandoned = (g1_policy()->collection_set() == NULL); | |
2529 // Now we allow more regions to be added (we have to collect | |
2530 // all popular regions). | |
2531 if (!abandoned) { | |
2532 g1_policy()->choose_collection_set(popular_region); | |
2533 } | |
2534 } | |
2535 // We may abandon a pause if we find no region that will fit in the MMU | |
2536 // pause. | |
2537 abandoned = (g1_policy()->collection_set() == NULL); | |
2538 | |
2539 // Nothing to do if we were unable to choose a collection set. | |
2540 if (!abandoned) { | |
2541 #if G1_REM_SET_LOGGING | |
2542 gclog_or_tty->print_cr("\nAfter pause, heap:"); | |
2543 print(); | |
2544 #endif | |
2545 | |
2546 setup_surviving_young_words(); | |
2547 | |
2548 // Set up the gc allocation regions. | |
2549 get_gc_alloc_regions(); | |
2550 | |
2551 // Actually do the work... | |
2552 evacuate_collection_set(); | |
2553 free_collection_set(g1_policy()->collection_set()); | |
2554 g1_policy()->clear_collection_set(); | |
2555 | |
2556 if (popular_region != NULL) { | |
2557 // We have to wait until now, because we don't want the region to | |
2558 // be rescheduled for pop-evac during RS update. | |
2559 popular_region->set_popular_pending(false); | |
2560 } | |
2561 | |
2562 release_gc_alloc_regions(); | |
2563 | |
2564 cleanup_surviving_young_words(); | |
2565 | |
2566 if (g1_policy()->in_young_gc_mode()) { | |
2567 _young_list->reset_sampled_info(); | |
2568 assert(check_young_list_empty(true), | |
2569 "young list should be empty"); | |
2570 | |
2571 #if SCAN_ONLY_VERBOSE | |
2572 _young_list->print(); | |
2573 #endif // SCAN_ONLY_VERBOSE | |
2574 | |
2575 _young_list->reset_auxilary_lists(); | |
2576 } | |
2577 } else { | |
2578 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
2579 } | |
2580 | |
2581 if (evacuation_failed()) { | |
2582 _summary_bytes_used = recalculate_used(); | |
2583 } else { | |
2584 // The "used" of the the collection set have already been subtracted | |
2585 // when they were freed. Add in the bytes evacuated. | |
2586 _summary_bytes_used += g1_policy()->bytes_in_to_space(); | |
2587 } | |
2588 | |
2589 if (g1_policy()->in_young_gc_mode() && | |
2590 g1_policy()->should_initiate_conc_mark()) { | |
2591 concurrent_mark()->checkpointRootsInitialPost(); | |
2592 set_marking_started(); | |
2593 doConcurrentMark(); | |
2594 } | |
2595 | |
2596 #if SCAN_ONLY_VERBOSE | |
2597 _young_list->print(); | |
2598 #endif // SCAN_ONLY_VERBOSE | |
2599 | |
2600 double end_time_sec = os::elapsedTime(); | |
2601 g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0); | |
2602 GCOverheadReporter::recordSTWEnd(end_time_sec); | |
2603 g1_policy()->record_collection_pause_end(popular_region != NULL, | |
2604 abandoned); | |
2605 | |
2606 assert(regions_accounted_for(), "Region leakage."); | |
2607 | |
2608 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { | |
2609 HandleMark hm; // Discard invalid handles created during verification | |
2610 gclog_or_tty->print(" VerifyAfterGC:"); | |
2611 Universe::verify(false); | |
2612 } | |
2613 | |
2614 if (was_enabled) ref_processor()->enable_discovery(); | |
2615 | |
2616 { | |
2617 size_t expand_bytes = g1_policy()->expansion_amount(); | |
2618 if (expand_bytes > 0) { | |
2619 size_t bytes_before = capacity(); | |
2620 expand(expand_bytes); | |
2621 } | |
2622 } | |
2623 | |
2624 if (mark_in_progress()) | |
2625 concurrent_mark()->update_g1_committed(); | |
2626 | |
2627 gc_epilogue(false); | |
2628 } | |
2629 | |
2630 assert(verify_region_lists(), "Bad region lists."); | |
2631 | |
2632 if (reset_should_initiate_conc_mark) | |
2633 g1_policy()->set_should_initiate_conc_mark(); | |
2634 | |
2635 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { | |
2636 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
2637 print_tracing_info(); | |
2638 vm_exit(-1); | |
2639 } | |
2640 } | |
2641 | |
2642 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { | |
2643 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); | |
2644 HeapWord* original_top = NULL; | |
2645 if (r != NULL) | |
2646 original_top = r->top(); | |
2647 | |
2648 // We will want to record the used space in r as being there before gc. | |
2649 // One we install it as a GC alloc region it's eligible for allocation. | |
2650 // So record it now and use it later. | |
2651 size_t r_used = 0; | |
2652 if (r != NULL) { | |
2653 r_used = r->used(); | |
2654 | |
2655 if (ParallelGCThreads > 0) { | |
2656 // need to take the lock to guard against two threads calling | |
2657 // get_gc_alloc_region concurrently (very unlikely but...) | |
2658 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
2659 r->save_marks(); | |
2660 } | |
2661 } | |
2662 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; | |
2663 _gc_alloc_regions[purpose] = r; | |
2664 if (old_alloc_region != NULL) { | |
2665 // Replace aliases too. | |
2666 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2667 if (_gc_alloc_regions[ap] == old_alloc_region) { | |
2668 _gc_alloc_regions[ap] = r; | |
2669 } | |
2670 } | |
2671 } | |
2672 if (r != NULL) { | |
2673 push_gc_alloc_region(r); | |
2674 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { | |
2675 // We are using a region as a GC alloc region after it has been used | |
2676 // as a mutator allocation region during the current marking cycle. | |
2677 // The mutator-allocated objects are currently implicitly marked, but | |
2678 // when we move hr->next_top_at_mark_start() forward at the the end | |
2679 // of the GC pause, they won't be. We therefore mark all objects in | |
2680 // the "gap". We do this object-by-object, since marking densely | |
2681 // does not currently work right with marking bitmap iteration. This | |
2682 // means we rely on TLAB filling at the start of pauses, and no | |
2683 // "resuscitation" of filled TLAB's. If we want to do this, we need | |
2684 // to fix the marking bitmap iteration. | |
2685 HeapWord* curhw = r->next_top_at_mark_start(); | |
2686 HeapWord* t = original_top; | |
2687 | |
2688 while (curhw < t) { | |
2689 oop cur = (oop)curhw; | |
2690 // We'll assume parallel for generality. This is rare code. | |
2691 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? | |
2692 curhw = curhw + cur->size(); | |
2693 } | |
2694 assert(curhw == t, "Should have parsed correctly."); | |
2695 } | |
2696 if (G1PolicyVerbose > 1) { | |
2697 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " | |
2698 "for survivors:", r->bottom(), original_top, r->end()); | |
2699 r->print(); | |
2700 } | |
2701 g1_policy()->record_before_bytes(r_used); | |
2702 } | |
2703 } | |
2704 | |
2705 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { | |
2706 assert(Thread::current()->is_VM_thread() || | |
2707 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); | |
2708 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), | |
2709 "Precondition."); | |
2710 hr->set_is_gc_alloc_region(true); | |
2711 hr->set_next_gc_alloc_region(_gc_alloc_region_list); | |
2712 _gc_alloc_region_list = hr; | |
2713 } | |
2714 | |
2715 #ifdef G1_DEBUG | |
2716 class FindGCAllocRegion: public HeapRegionClosure { | |
2717 public: | |
2718 bool doHeapRegion(HeapRegion* r) { | |
2719 if (r->is_gc_alloc_region()) { | |
2720 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", | |
2721 r->hrs_index(), r->bottom()); | |
2722 } | |
2723 return false; | |
2724 } | |
2725 }; | |
2726 #endif // G1_DEBUG | |
2727 | |
2728 void G1CollectedHeap::forget_alloc_region_list() { | |
2729 assert(Thread::current()->is_VM_thread(), "Precondition"); | |
2730 while (_gc_alloc_region_list != NULL) { | |
2731 HeapRegion* r = _gc_alloc_region_list; | |
2732 assert(r->is_gc_alloc_region(), "Invariant."); | |
2733 _gc_alloc_region_list = r->next_gc_alloc_region(); | |
2734 r->set_next_gc_alloc_region(NULL); | |
2735 r->set_is_gc_alloc_region(false); | |
2736 if (r->is_empty()) { | |
2737 ++_free_regions; | |
2738 } | |
2739 } | |
2740 #ifdef G1_DEBUG | |
2741 FindGCAllocRegion fa; | |
2742 heap_region_iterate(&fa); | |
2743 #endif // G1_DEBUG | |
2744 } | |
2745 | |
2746 | |
2747 bool G1CollectedHeap::check_gc_alloc_regions() { | |
2748 // TODO: allocation regions check | |
2749 return true; | |
2750 } | |
2751 | |
2752 void G1CollectedHeap::get_gc_alloc_regions() { | |
2753 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2754 // Create new GC alloc regions. | |
2755 HeapRegion* alloc_region = _gc_alloc_regions[ap]; | |
2756 // Clear this alloc region, so that in case it turns out to be | |
2757 // unacceptable, we end up with no allocation region, rather than a bad | |
2758 // one. | |
2759 _gc_alloc_regions[ap] = NULL; | |
2760 if (alloc_region == NULL || alloc_region->in_collection_set()) { | |
2761 // Can't re-use old one. Allocate a new one. | |
2762 alloc_region = newAllocRegionWithExpansion(ap, 0); | |
2763 } | |
2764 if (alloc_region != NULL) { | |
2765 set_gc_alloc_region(ap, alloc_region); | |
2766 } | |
2767 } | |
2768 // Set alternative regions for allocation purposes that have reached | |
2769 // thier limit. | |
2770 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2771 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); | |
2772 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { | |
2773 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; | |
2774 } | |
2775 } | |
2776 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
2777 } | |
2778 | |
2779 void G1CollectedHeap::release_gc_alloc_regions() { | |
2780 // We keep a separate list of all regions that have been alloc regions in | |
2781 // the current collection pause. Forget that now. | |
2782 forget_alloc_region_list(); | |
2783 | |
2784 // The current alloc regions contain objs that have survived | |
2785 // collection. Make them no longer GC alloc regions. | |
2786 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
2787 HeapRegion* r = _gc_alloc_regions[ap]; | |
2788 if (r != NULL && r->is_empty()) { | |
2789 { | |
2790 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
2791 r->set_zero_fill_complete(); | |
2792 put_free_region_on_list_locked(r); | |
2793 } | |
2794 } | |
2795 // set_gc_alloc_region will also NULLify all aliases to the region | |
2796 set_gc_alloc_region(ap, NULL); | |
2797 _gc_alloc_region_counts[ap] = 0; | |
2798 } | |
2799 } | |
2800 | |
2801 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { | |
2802 _drain_in_progress = false; | |
2803 set_evac_failure_closure(cl); | |
2804 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
2805 } | |
2806 | |
2807 void G1CollectedHeap::finalize_for_evac_failure() { | |
2808 assert(_evac_failure_scan_stack != NULL && | |
2809 _evac_failure_scan_stack->length() == 0, | |
2810 "Postcondition"); | |
2811 assert(!_drain_in_progress, "Postcondition"); | |
2812 // Don't have to delete, since the scan stack is a resource object. | |
2813 _evac_failure_scan_stack = NULL; | |
2814 } | |
2815 | |
2816 | |
2817 | |
2818 // *** Sequential G1 Evacuation | |
2819 | |
2820 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { | |
2821 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
2822 // let the caller handle alloc failure | |
2823 if (alloc_region == NULL) return NULL; | |
2824 assert(isHumongous(word_size) || !alloc_region->isHumongous(), | |
2825 "Either the object is humongous or the region isn't"); | |
2826 HeapWord* block = alloc_region->allocate(word_size); | |
2827 if (block == NULL) { | |
2828 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); | |
2829 } | |
2830 return block; | |
2831 } | |
2832 | |
2833 class G1IsAliveClosure: public BoolObjectClosure { | |
2834 G1CollectedHeap* _g1; | |
2835 public: | |
2836 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
2837 void do_object(oop p) { assert(false, "Do not call."); } | |
2838 bool do_object_b(oop p) { | |
2839 // It is reachable if it is outside the collection set, or is inside | |
2840 // and forwarded. | |
2841 | |
2842 #ifdef G1_DEBUG | |
2843 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", | |
2844 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), | |
2845 !_g1->obj_in_cs(p) || p->is_forwarded()); | |
2846 #endif // G1_DEBUG | |
2847 | |
2848 return !_g1->obj_in_cs(p) || p->is_forwarded(); | |
2849 } | |
2850 }; | |
2851 | |
2852 class G1KeepAliveClosure: public OopClosure { | |
2853 G1CollectedHeap* _g1; | |
2854 public: | |
2855 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | |
2856 void do_oop(narrowOop* p) { | |
2857 guarantee(false, "NYI"); | |
2858 } | |
2859 void do_oop(oop* p) { | |
2860 oop obj = *p; | |
2861 #ifdef G1_DEBUG | |
2862 if (PrintGC && Verbose) { | |
2863 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, | |
2864 p, (void*) obj, (void*) *p); | |
2865 } | |
2866 #endif // G1_DEBUG | |
2867 | |
2868 if (_g1->obj_in_cs(obj)) { | |
2869 assert( obj->is_forwarded(), "invariant" ); | |
2870 *p = obj->forwardee(); | |
2871 | |
2872 #ifdef G1_DEBUG | |
2873 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, | |
2874 (void*) obj, (void*) *p); | |
2875 #endif // G1_DEBUG | |
2876 } | |
2877 } | |
2878 }; | |
2879 | |
2880 class RecreateRSetEntriesClosure: public OopClosure { | |
2881 private: | |
2882 G1CollectedHeap* _g1; | |
2883 G1RemSet* _g1_rem_set; | |
2884 HeapRegion* _from; | |
2885 public: | |
2886 RecreateRSetEntriesClosure(G1CollectedHeap* g1, HeapRegion* from) : | |
2887 _g1(g1), _g1_rem_set(g1->g1_rem_set()), _from(from) | |
2888 {} | |
2889 | |
2890 void do_oop(narrowOop* p) { | |
2891 guarantee(false, "NYI"); | |
2892 } | |
2893 void do_oop(oop* p) { | |
2894 assert(_from->is_in_reserved(p), "paranoia"); | |
2895 if (*p != NULL) { | |
2896 _g1_rem_set->write_ref(_from, p); | |
2897 } | |
2898 } | |
2899 }; | |
2900 | |
2901 class RemoveSelfPointerClosure: public ObjectClosure { | |
2902 private: | |
2903 G1CollectedHeap* _g1; | |
2904 ConcurrentMark* _cm; | |
2905 HeapRegion* _hr; | |
2906 size_t _prev_marked_bytes; | |
2907 size_t _next_marked_bytes; | |
2908 public: | |
2909 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr) : | |
2910 _g1(g1), _cm(_g1->concurrent_mark()), _hr(hr), | |
2911 _prev_marked_bytes(0), _next_marked_bytes(0) | |
2912 {} | |
2913 | |
2914 size_t prev_marked_bytes() { return _prev_marked_bytes; } | |
2915 size_t next_marked_bytes() { return _next_marked_bytes; } | |
2916 | |
352
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2917 // The original idea here was to coalesce evacuated and dead objects. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2918 // However that caused complications with the block offset table (BOT). |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2919 // In particular if there were two TLABs, one of them partially refined. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2920 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2921 // The BOT entries of the unrefined part of TLAB_2 point to the start |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2922 // of TLAB_2. If the last object of the TLAB_1 and the first object |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2923 // of TLAB_2 are coalesced, then the cards of the unrefined part |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2924 // would point into middle of the filler object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2925 // |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2926 // The current approach is to not coalesce and leave the BOT contents intact. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2927 void do_object(oop obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2928 if (obj->is_forwarded() && obj->forwardee() == obj) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2929 // The object failed to move. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2930 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2931 _cm->markPrev(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2932 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2933 _prev_marked_bytes += (obj->size() * HeapWordSize); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2934 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2935 _cm->markAndGrayObjectIfNecessary(obj); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2936 } |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2937 obj->set_mark(markOopDesc::prototype()); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2938 // While we were processing RSet buffers during the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2939 // collection, we actually didn't scan any cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2940 // collection set, since we didn't want to update remebered |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2941 // sets with entries that point into the collection set, given |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2942 // that live objects fromthe collection set are about to move |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2943 // and such entries will be stale very soon. This change also |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2944 // dealt with a reliability issue which involved scanning a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2945 // card in the collection set and coming across an array that |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2946 // was being chunked and looking malformed. The problem is |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2947 // that, if evacuation fails, we might have remembered set |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2948 // entries missing given that we skipped cards on the |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2949 // collection set. So, we'll recreate such entries now. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2950 RecreateRSetEntriesClosure cl(_g1, _hr); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2951 obj->oop_iterate(&cl); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2952 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2953 } else { |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2954 // The object has been either evacuated or is dead. Fill it with a |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2955 // dummy object. |
e0c09f7ec5c4
6702387: G1: assertion failure: assert(p == current_top || oop(p)->is_oop(),"p is not a block start")
iveresov
parents:
342
diff
changeset
|
2956 MemRegion mr((HeapWord*)obj, obj->size()); |
342 | 2957 SharedHeap::fill_region_with_object(mr); |
2958 _cm->clearRangeBothMaps(mr); | |
2959 } | |
2960 } | |
2961 }; | |
2962 | |
2963 void G1CollectedHeap::remove_self_forwarding_pointers() { | |
2964 HeapRegion* cur = g1_policy()->collection_set(); | |
2965 | |
2966 while (cur != NULL) { | |
2967 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
2968 | |
2969 if (cur->evacuation_failed()) { | |
2970 RemoveSelfPointerClosure rspc(_g1h, cur); | |
2971 assert(cur->in_collection_set(), "bad CS"); | |
2972 cur->object_iterate(&rspc); | |
2973 | |
2974 // A number of manipulations to make the TAMS be the current top, | |
2975 // and the marked bytes be the ones observed in the iteration. | |
2976 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { | |
2977 // The comments below are the postconditions achieved by the | |
2978 // calls. Note especially the last such condition, which says that | |
2979 // the count of marked bytes has been properly restored. | |
2980 cur->note_start_of_marking(false); | |
2981 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
2982 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); | |
2983 // _next_marked_bytes == prev_marked_bytes. | |
2984 cur->note_end_of_marking(); | |
2985 // _prev_top_at_mark_start == top(), | |
2986 // _prev_marked_bytes == prev_marked_bytes | |
2987 } | |
2988 // If there is no mark in progress, we modified the _next variables | |
2989 // above needlessly, but harmlessly. | |
2990 if (_g1h->mark_in_progress()) { | |
2991 cur->note_start_of_marking(false); | |
2992 // _next_top_at_mark_start == top, _next_marked_bytes == 0 | |
2993 // _next_marked_bytes == next_marked_bytes. | |
2994 } | |
2995 | |
2996 // Now make sure the region has the right index in the sorted array. | |
2997 g1_policy()->note_change_in_marked_bytes(cur); | |
2998 } | |
2999 cur = cur->next_in_collection_set(); | |
3000 } | |
3001 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); | |
3002 | |
3003 // Now restore saved marks, if any. | |
3004 if (_objs_with_preserved_marks != NULL) { | |
3005 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
3006 assert(_objs_with_preserved_marks->length() == | |
3007 _preserved_marks_of_objs->length(), "Both or none."); | |
3008 guarantee(_objs_with_preserved_marks->length() == | |
3009 _preserved_marks_of_objs->length(), "Both or none."); | |
3010 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
3011 oop obj = _objs_with_preserved_marks->at(i); | |
3012 markOop m = _preserved_marks_of_objs->at(i); | |
3013 obj->set_mark(m); | |
3014 } | |
3015 // Delete the preserved marks growable arrays (allocated on the C heap). | |
3016 delete _objs_with_preserved_marks; | |
3017 delete _preserved_marks_of_objs; | |
3018 _objs_with_preserved_marks = NULL; | |
3019 _preserved_marks_of_objs = NULL; | |
3020 } | |
3021 } | |
3022 | |
3023 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { | |
3024 _evac_failure_scan_stack->push(obj); | |
3025 } | |
3026 | |
3027 void G1CollectedHeap::drain_evac_failure_scan_stack() { | |
3028 assert(_evac_failure_scan_stack != NULL, "precondition"); | |
3029 | |
3030 while (_evac_failure_scan_stack->length() > 0) { | |
3031 oop obj = _evac_failure_scan_stack->pop(); | |
3032 _evac_failure_closure->set_region(heap_region_containing(obj)); | |
3033 obj->oop_iterate_backwards(_evac_failure_closure); | |
3034 } | |
3035 } | |
3036 | |
3037 void G1CollectedHeap::handle_evacuation_failure(oop old) { | |
3038 markOop m = old->mark(); | |
3039 // forward to self | |
3040 assert(!old->is_forwarded(), "precondition"); | |
3041 | |
3042 old->forward_to(old); | |
3043 handle_evacuation_failure_common(old, m); | |
3044 } | |
3045 | |
3046 oop | |
3047 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, | |
3048 oop old) { | |
3049 markOop m = old->mark(); | |
3050 oop forward_ptr = old->forward_to_atomic(old); | |
3051 if (forward_ptr == NULL) { | |
3052 // Forward-to-self succeeded. | |
3053 if (_evac_failure_closure != cl) { | |
3054 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); | |
3055 assert(!_drain_in_progress, | |
3056 "Should only be true while someone holds the lock."); | |
3057 // Set the global evac-failure closure to the current thread's. | |
3058 assert(_evac_failure_closure == NULL, "Or locking has failed."); | |
3059 set_evac_failure_closure(cl); | |
3060 // Now do the common part. | |
3061 handle_evacuation_failure_common(old, m); | |
3062 // Reset to NULL. | |
3063 set_evac_failure_closure(NULL); | |
3064 } else { | |
3065 // The lock is already held, and this is recursive. | |
3066 assert(_drain_in_progress, "This should only be the recursive case."); | |
3067 handle_evacuation_failure_common(old, m); | |
3068 } | |
3069 return old; | |
3070 } else { | |
3071 // Someone else had a place to copy it. | |
3072 return forward_ptr; | |
3073 } | |
3074 } | |
3075 | |
3076 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { | |
3077 set_evacuation_failed(true); | |
3078 | |
3079 preserve_mark_if_necessary(old, m); | |
3080 | |
3081 HeapRegion* r = heap_region_containing(old); | |
3082 if (!r->evacuation_failed()) { | |
3083 r->set_evacuation_failed(true); | |
3084 if (G1TraceRegions) { | |
3085 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " | |
3086 "["PTR_FORMAT","PTR_FORMAT")\n", | |
3087 r, r->bottom(), r->end()); | |
3088 } | |
3089 } | |
3090 | |
3091 push_on_evac_failure_scan_stack(old); | |
3092 | |
3093 if (!_drain_in_progress) { | |
3094 // prevent recursion in copy_to_survivor_space() | |
3095 _drain_in_progress = true; | |
3096 drain_evac_failure_scan_stack(); | |
3097 _drain_in_progress = false; | |
3098 } | |
3099 } | |
3100 | |
3101 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { | |
3102 if (m != markOopDesc::prototype()) { | |
3103 if (_objs_with_preserved_marks == NULL) { | |
3104 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
3105 _objs_with_preserved_marks = | |
3106 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
3107 _preserved_marks_of_objs = | |
3108 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
3109 } | |
3110 _objs_with_preserved_marks->push(obj); | |
3111 _preserved_marks_of_objs->push(m); | |
3112 } | |
3113 } | |
3114 | |
3115 // *** Parallel G1 Evacuation | |
3116 | |
3117 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, | |
3118 size_t word_size) { | |
3119 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; | |
3120 // let the caller handle alloc failure | |
3121 if (alloc_region == NULL) return NULL; | |
3122 | |
3123 HeapWord* block = alloc_region->par_allocate(word_size); | |
3124 if (block == NULL) { | |
3125 MutexLockerEx x(par_alloc_during_gc_lock(), | |
3126 Mutex::_no_safepoint_check_flag); | |
3127 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); | |
3128 } | |
3129 return block; | |
3130 } | |
3131 | |
3132 HeapWord* | |
3133 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, | |
3134 HeapRegion* alloc_region, | |
3135 bool par, | |
3136 size_t word_size) { | |
3137 HeapWord* block = NULL; | |
3138 // In the parallel case, a previous thread to obtain the lock may have | |
3139 // already assigned a new gc_alloc_region. | |
3140 if (alloc_region != _gc_alloc_regions[purpose]) { | |
3141 assert(par, "But should only happen in parallel case."); | |
3142 alloc_region = _gc_alloc_regions[purpose]; | |
3143 if (alloc_region == NULL) return NULL; | |
3144 block = alloc_region->par_allocate(word_size); | |
3145 if (block != NULL) return block; | |
3146 // Otherwise, continue; this new region is empty, too. | |
3147 } | |
3148 assert(alloc_region != NULL, "We better have an allocation region"); | |
3149 // Another thread might have obtained alloc_region for the given | |
3150 // purpose, and might be attempting to allocate in it, and might | |
3151 // succeed. Therefore, we can't do the "finalization" stuff on the | |
3152 // region below until we're sure the last allocation has happened. | |
3153 // We ensure this by allocating the remaining space with a garbage | |
3154 // object. | |
3155 if (par) par_allocate_remaining_space(alloc_region); | |
3156 // Now we can do the post-GC stuff on the region. | |
3157 alloc_region->note_end_of_copying(); | |
3158 g1_policy()->record_after_bytes(alloc_region->used()); | |
3159 | |
3160 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { | |
3161 // Cannot allocate more regions for the given purpose. | |
3162 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); | |
3163 // Is there an alternative? | |
3164 if (purpose != alt_purpose) { | |
3165 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; | |
3166 // Has not the alternative region been aliased? | |
3167 if (alloc_region != alt_region) { | |
3168 // Try to allocate in the alternative region. | |
3169 if (par) { | |
3170 block = alt_region->par_allocate(word_size); | |
3171 } else { | |
3172 block = alt_region->allocate(word_size); | |
3173 } | |
3174 // Make an alias. | |
3175 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; | |
3176 } | |
3177 if (block != NULL) { | |
3178 return block; | |
3179 } | |
3180 // Both the allocation region and the alternative one are full | |
3181 // and aliased, replace them with a new allocation region. | |
3182 purpose = alt_purpose; | |
3183 } else { | |
3184 set_gc_alloc_region(purpose, NULL); | |
3185 return NULL; | |
3186 } | |
3187 } | |
3188 | |
3189 // Now allocate a new region for allocation. | |
3190 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); | |
3191 | |
3192 // let the caller handle alloc failure | |
3193 if (alloc_region != NULL) { | |
3194 | |
3195 assert(check_gc_alloc_regions(), "alloc regions messed up"); | |
3196 assert(alloc_region->saved_mark_at_top(), | |
3197 "Mark should have been saved already."); | |
3198 // We used to assert that the region was zero-filled here, but no | |
3199 // longer. | |
3200 | |
3201 // This must be done last: once it's installed, other regions may | |
3202 // allocate in it (without holding the lock.) | |
3203 set_gc_alloc_region(purpose, alloc_region); | |
3204 | |
3205 if (par) { | |
3206 block = alloc_region->par_allocate(word_size); | |
3207 } else { | |
3208 block = alloc_region->allocate(word_size); | |
3209 } | |
3210 // Caller handles alloc failure. | |
3211 } else { | |
3212 // This sets other apis using the same old alloc region to NULL, also. | |
3213 set_gc_alloc_region(purpose, NULL); | |
3214 } | |
3215 return block; // May be NULL. | |
3216 } | |
3217 | |
3218 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { | |
3219 HeapWord* block = NULL; | |
3220 size_t free_words; | |
3221 do { | |
3222 free_words = r->free()/HeapWordSize; | |
3223 // If there's too little space, no one can allocate, so we're done. | |
3224 if (free_words < (size_t)oopDesc::header_size()) return; | |
3225 // Otherwise, try to claim it. | |
3226 block = r->par_allocate(free_words); | |
3227 } while (block == NULL); | |
3228 SharedHeap::fill_region_with_object(MemRegion(block, free_words)); | |
3229 } | |
3230 | |
3231 #define use_local_bitmaps 1 | |
3232 #define verify_local_bitmaps 0 | |
3233 | |
3234 #ifndef PRODUCT | |
3235 | |
3236 class GCLabBitMap; | |
3237 class GCLabBitMapClosure: public BitMapClosure { | |
3238 private: | |
3239 ConcurrentMark* _cm; | |
3240 GCLabBitMap* _bitmap; | |
3241 | |
3242 public: | |
3243 GCLabBitMapClosure(ConcurrentMark* cm, | |
3244 GCLabBitMap* bitmap) { | |
3245 _cm = cm; | |
3246 _bitmap = bitmap; | |
3247 } | |
3248 | |
3249 virtual bool do_bit(size_t offset); | |
3250 }; | |
3251 | |
3252 #endif // PRODUCT | |
3253 | |
3254 #define oop_buffer_length 256 | |
3255 | |
3256 class GCLabBitMap: public BitMap { | |
3257 private: | |
3258 ConcurrentMark* _cm; | |
3259 | |
3260 int _shifter; | |
3261 size_t _bitmap_word_covers_words; | |
3262 | |
3263 // beginning of the heap | |
3264 HeapWord* _heap_start; | |
3265 | |
3266 // this is the actual start of the GCLab | |
3267 HeapWord* _real_start_word; | |
3268 | |
3269 // this is the actual end of the GCLab | |
3270 HeapWord* _real_end_word; | |
3271 | |
3272 // this is the first word, possibly located before the actual start | |
3273 // of the GCLab, that corresponds to the first bit of the bitmap | |
3274 HeapWord* _start_word; | |
3275 | |
3276 // size of a GCLab in words | |
3277 size_t _gclab_word_size; | |
3278 | |
3279 static int shifter() { | |
3280 return MinObjAlignment - 1; | |
3281 } | |
3282 | |
3283 // how many heap words does a single bitmap word corresponds to? | |
3284 static size_t bitmap_word_covers_words() { | |
3285 return BitsPerWord << shifter(); | |
3286 } | |
3287 | |
3288 static size_t gclab_word_size() { | |
3289 return ParallelGCG1AllocBufferSize / HeapWordSize; | |
3290 } | |
3291 | |
3292 static size_t bitmap_size_in_bits() { | |
3293 size_t bits_in_bitmap = gclab_word_size() >> shifter(); | |
3294 // We are going to ensure that the beginning of a word in this | |
3295 // bitmap also corresponds to the beginning of a word in the | |
3296 // global marking bitmap. To handle the case where a GCLab | |
3297 // starts from the middle of the bitmap, we need to add enough | |
3298 // space (i.e. up to a bitmap word) to ensure that we have | |
3299 // enough bits in the bitmap. | |
3300 return bits_in_bitmap + BitsPerWord - 1; | |
3301 } | |
3302 public: | |
3303 GCLabBitMap(HeapWord* heap_start) | |
3304 : BitMap(bitmap_size_in_bits()), | |
3305 _cm(G1CollectedHeap::heap()->concurrent_mark()), | |
3306 _shifter(shifter()), | |
3307 _bitmap_word_covers_words(bitmap_word_covers_words()), | |
3308 _heap_start(heap_start), | |
3309 _gclab_word_size(gclab_word_size()), | |
3310 _real_start_word(NULL), | |
3311 _real_end_word(NULL), | |
3312 _start_word(NULL) | |
3313 { | |
3314 guarantee( size_in_words() >= bitmap_size_in_words(), | |
3315 "just making sure"); | |
3316 } | |
3317 | |
3318 inline unsigned heapWordToOffset(HeapWord* addr) { | |
3319 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; | |
3320 assert(offset < size(), "offset should be within bounds"); | |
3321 return offset; | |
3322 } | |
3323 | |
3324 inline HeapWord* offsetToHeapWord(size_t offset) { | |
3325 HeapWord* addr = _start_word + (offset << _shifter); | |
3326 assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); | |
3327 return addr; | |
3328 } | |
3329 | |
3330 bool fields_well_formed() { | |
3331 bool ret1 = (_real_start_word == NULL) && | |
3332 (_real_end_word == NULL) && | |
3333 (_start_word == NULL); | |
3334 if (ret1) | |
3335 return true; | |
3336 | |
3337 bool ret2 = _real_start_word >= _start_word && | |
3338 _start_word < _real_end_word && | |
3339 (_real_start_word + _gclab_word_size) == _real_end_word && | |
3340 (_start_word + _gclab_word_size + _bitmap_word_covers_words) | |
3341 > _real_end_word; | |
3342 return ret2; | |
3343 } | |
3344 | |
3345 inline bool mark(HeapWord* addr) { | |
3346 guarantee(use_local_bitmaps, "invariant"); | |
3347 assert(fields_well_formed(), "invariant"); | |
3348 | |
3349 if (addr >= _real_start_word && addr < _real_end_word) { | |
3350 assert(!isMarked(addr), "should not have already been marked"); | |
3351 | |
3352 // first mark it on the bitmap | |
3353 at_put(heapWordToOffset(addr), true); | |
3354 | |
3355 return true; | |
3356 } else { | |
3357 return false; | |
3358 } | |
3359 } | |
3360 | |
3361 inline bool isMarked(HeapWord* addr) { | |
3362 guarantee(use_local_bitmaps, "invariant"); | |
3363 assert(fields_well_formed(), "invariant"); | |
3364 | |
3365 return at(heapWordToOffset(addr)); | |
3366 } | |
3367 | |
3368 void set_buffer(HeapWord* start) { | |
3369 guarantee(use_local_bitmaps, "invariant"); | |
3370 clear(); | |
3371 | |
3372 assert(start != NULL, "invariant"); | |
3373 _real_start_word = start; | |
3374 _real_end_word = start + _gclab_word_size; | |
3375 | |
3376 size_t diff = | |
3377 pointer_delta(start, _heap_start) % _bitmap_word_covers_words; | |
3378 _start_word = start - diff; | |
3379 | |
3380 assert(fields_well_formed(), "invariant"); | |
3381 } | |
3382 | |
3383 #ifndef PRODUCT | |
3384 void verify() { | |
3385 // verify that the marks have been propagated | |
3386 GCLabBitMapClosure cl(_cm, this); | |
3387 iterate(&cl); | |
3388 } | |
3389 #endif // PRODUCT | |
3390 | |
3391 void retire() { | |
3392 guarantee(use_local_bitmaps, "invariant"); | |
3393 assert(fields_well_formed(), "invariant"); | |
3394 | |
3395 if (_start_word != NULL) { | |
3396 CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); | |
3397 | |
3398 // this means that the bitmap was set up for the GCLab | |
3399 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); | |
3400 | |
3401 mark_bitmap->mostly_disjoint_range_union(this, | |
3402 0, // always start from the start of the bitmap | |
3403 _start_word, | |
3404 size_in_words()); | |
3405 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); | |
3406 | |
3407 #ifndef PRODUCT | |
3408 if (use_local_bitmaps && verify_local_bitmaps) | |
3409 verify(); | |
3410 #endif // PRODUCT | |
3411 } else { | |
3412 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); | |
3413 } | |
3414 } | |
3415 | |
3416 static size_t bitmap_size_in_words() { | |
3417 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; | |
3418 } | |
3419 }; | |
3420 | |
3421 #ifndef PRODUCT | |
3422 | |
3423 bool GCLabBitMapClosure::do_bit(size_t offset) { | |
3424 HeapWord* addr = _bitmap->offsetToHeapWord(offset); | |
3425 guarantee(_cm->isMarked(oop(addr)), "it should be!"); | |
3426 return true; | |
3427 } | |
3428 | |
3429 #endif // PRODUCT | |
3430 | |
3431 class G1ParGCAllocBuffer: public ParGCAllocBuffer { | |
3432 private: | |
3433 bool _retired; | |
3434 bool _during_marking; | |
3435 GCLabBitMap _bitmap; | |
3436 | |
3437 public: | |
3438 G1ParGCAllocBuffer() : | |
3439 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize), | |
3440 _during_marking(G1CollectedHeap::heap()->mark_in_progress()), | |
3441 _bitmap(G1CollectedHeap::heap()->reserved_region().start()), | |
3442 _retired(false) | |
3443 { } | |
3444 | |
3445 inline bool mark(HeapWord* addr) { | |
3446 guarantee(use_local_bitmaps, "invariant"); | |
3447 assert(_during_marking, "invariant"); | |
3448 return _bitmap.mark(addr); | |
3449 } | |
3450 | |
3451 inline void set_buf(HeapWord* buf) { | |
3452 if (use_local_bitmaps && _during_marking) | |
3453 _bitmap.set_buffer(buf); | |
3454 ParGCAllocBuffer::set_buf(buf); | |
3455 _retired = false; | |
3456 } | |
3457 | |
3458 inline void retire(bool end_of_gc, bool retain) { | |
3459 if (_retired) | |
3460 return; | |
3461 if (use_local_bitmaps && _during_marking) { | |
3462 _bitmap.retire(); | |
3463 } | |
3464 ParGCAllocBuffer::retire(end_of_gc, retain); | |
3465 _retired = true; | |
3466 } | |
3467 }; | |
3468 | |
3469 | |
3470 class G1ParScanThreadState : public StackObj { | |
3471 protected: | |
3472 G1CollectedHeap* _g1h; | |
3473 RefToScanQueue* _refs; | |
3474 | |
3475 typedef GrowableArray<oop*> OverflowQueue; | |
3476 OverflowQueue* _overflowed_refs; | |
3477 | |
3478 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; | |
3479 | |
3480 size_t _alloc_buffer_waste; | |
3481 size_t _undo_waste; | |
3482 | |
3483 OopsInHeapRegionClosure* _evac_failure_cl; | |
3484 G1ParScanHeapEvacClosure* _evac_cl; | |
3485 G1ParScanPartialArrayClosure* _partial_scan_cl; | |
3486 | |
3487 int _hash_seed; | |
3488 int _queue_num; | |
3489 | |
3490 int _term_attempts; | |
3491 #if G1_DETAILED_STATS | |
3492 int _pushes, _pops, _steals, _steal_attempts; | |
3493 int _overflow_pushes; | |
3494 #endif | |
3495 | |
3496 double _start; | |
3497 double _start_strong_roots; | |
3498 double _strong_roots_time; | |
3499 double _start_term; | |
3500 double _term_time; | |
3501 | |
3502 // Map from young-age-index (0 == not young, 1 is youngest) to | |
3503 // surviving words. base is what we get back from the malloc call | |
3504 size_t* _surviving_young_words_base; | |
3505 // this points into the array, as we use the first few entries for padding | |
3506 size_t* _surviving_young_words; | |
3507 | |
3508 #define PADDING_ELEM_NUM (64 / sizeof(size_t)) | |
3509 | |
3510 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } | |
3511 | |
3512 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } | |
3513 | |
3514 public: | |
3515 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) | |
3516 : _g1h(g1h), | |
3517 _refs(g1h->task_queue(queue_num)), | |
3518 _hash_seed(17), _queue_num(queue_num), | |
3519 _term_attempts(0), | |
3520 #if G1_DETAILED_STATS | |
3521 _pushes(0), _pops(0), _steals(0), | |
3522 _steal_attempts(0), _overflow_pushes(0), | |
3523 #endif | |
3524 _strong_roots_time(0), _term_time(0), | |
3525 _alloc_buffer_waste(0), _undo_waste(0) | |
3526 { | |
3527 // we allocate G1YoungSurvRateNumRegions plus one entries, since | |
3528 // we "sacrifice" entry 0 to keep track of surviving bytes for | |
3529 // non-young regions (where the age is -1) | |
3530 // We also add a few elements at the beginning and at the end in | |
3531 // an attempt to eliminate cache contention | |
3532 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); | |
3533 size_t array_length = PADDING_ELEM_NUM + | |
3534 real_length + | |
3535 PADDING_ELEM_NUM; | |
3536 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); | |
3537 if (_surviving_young_words_base == NULL) | |
3538 vm_exit_out_of_memory(array_length * sizeof(size_t), | |
3539 "Not enough space for young surv histo."); | |
3540 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; | |
3541 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); | |
3542 | |
3543 _overflowed_refs = new OverflowQueue(10); | |
3544 | |
3545 _start = os::elapsedTime(); | |
3546 } | |
3547 | |
3548 ~G1ParScanThreadState() { | |
3549 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); | |
3550 } | |
3551 | |
3552 RefToScanQueue* refs() { return _refs; } | |
3553 OverflowQueue* overflowed_refs() { return _overflowed_refs; } | |
3554 | |
3555 inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { | |
3556 return &_alloc_buffers[purpose]; | |
3557 } | |
3558 | |
3559 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } | |
3560 size_t undo_waste() { return _undo_waste; } | |
3561 | |
3562 void push_on_queue(oop* ref) { | |
3563 if (!refs()->push(ref)) { | |
3564 overflowed_refs()->push(ref); | |
3565 IF_G1_DETAILED_STATS(note_overflow_push()); | |
3566 } else { | |
3567 IF_G1_DETAILED_STATS(note_push()); | |
3568 } | |
3569 } | |
3570 | |
3571 void pop_from_queue(oop*& ref) { | |
3572 if (!refs()->pop_local(ref)) { | |
3573 ref = NULL; | |
3574 } else { | |
3575 IF_G1_DETAILED_STATS(note_pop()); | |
3576 } | |
3577 } | |
3578 | |
3579 void pop_from_overflow_queue(oop*& ref) { | |
3580 ref = overflowed_refs()->pop(); | |
3581 } | |
3582 | |
3583 int refs_to_scan() { return refs()->size(); } | |
3584 int overflowed_refs_to_scan() { return overflowed_refs()->length(); } | |
3585 | |
3586 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { | |
3587 | |
3588 HeapWord* obj = NULL; | |
3589 if (word_sz * 100 < | |
3590 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) * | |
3591 ParallelGCBufferWastePct) { | |
3592 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); | |
3593 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); | |
3594 alloc_buf->retire(false, false); | |
3595 | |
3596 HeapWord* buf = | |
3597 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize); | |
3598 if (buf == NULL) return NULL; // Let caller handle allocation failure. | |
3599 // Otherwise. | |
3600 alloc_buf->set_buf(buf); | |
3601 | |
3602 obj = alloc_buf->allocate(word_sz); | |
3603 assert(obj != NULL, "buffer was definitely big enough..."); | |
3604 } | |
3605 else { | |
3606 obj = _g1h->par_allocate_during_gc(purpose, word_sz); | |
3607 } | |
3608 return obj; | |
3609 } | |
3610 | |
3611 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { | |
3612 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); | |
3613 if (obj != NULL) return obj; | |
3614 return allocate_slow(purpose, word_sz); | |
3615 } | |
3616 | |
3617 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { | |
3618 if (alloc_buffer(purpose)->contains(obj)) { | |
3619 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), | |
3620 "should contain whole object"); | |
3621 alloc_buffer(purpose)->undo_allocation(obj, word_sz); | |
3622 } | |
3623 else { | |
3624 SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); | |
3625 add_to_undo_waste(word_sz); | |
3626 } | |
3627 } | |
3628 | |
3629 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { | |
3630 _evac_failure_cl = evac_failure_cl; | |
3631 } | |
3632 OopsInHeapRegionClosure* evac_failure_closure() { | |
3633 return _evac_failure_cl; | |
3634 } | |
3635 | |
3636 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { | |
3637 _evac_cl = evac_cl; | |
3638 } | |
3639 | |
3640 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { | |
3641 _partial_scan_cl = partial_scan_cl; | |
3642 } | |
3643 | |
3644 int* hash_seed() { return &_hash_seed; } | |
3645 int queue_num() { return _queue_num; } | |
3646 | |
3647 int term_attempts() { return _term_attempts; } | |
3648 void note_term_attempt() { _term_attempts++; } | |
3649 | |
3650 #if G1_DETAILED_STATS | |
3651 int pushes() { return _pushes; } | |
3652 int pops() { return _pops; } | |
3653 int steals() { return _steals; } | |
3654 int steal_attempts() { return _steal_attempts; } | |
3655 int overflow_pushes() { return _overflow_pushes; } | |
3656 | |
3657 void note_push() { _pushes++; } | |
3658 void note_pop() { _pops++; } | |
3659 void note_steal() { _steals++; } | |
3660 void note_steal_attempt() { _steal_attempts++; } | |
3661 void note_overflow_push() { _overflow_pushes++; } | |
3662 #endif | |
3663 | |
3664 void start_strong_roots() { | |
3665 _start_strong_roots = os::elapsedTime(); | |
3666 } | |
3667 void end_strong_roots() { | |
3668 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); | |
3669 } | |
3670 double strong_roots_time() { return _strong_roots_time; } | |
3671 | |
3672 void start_term_time() { | |
3673 note_term_attempt(); | |
3674 _start_term = os::elapsedTime(); | |
3675 } | |
3676 void end_term_time() { | |
3677 _term_time += (os::elapsedTime() - _start_term); | |
3678 } | |
3679 double term_time() { return _term_time; } | |
3680 | |
3681 double elapsed() { | |
3682 return os::elapsedTime() - _start; | |
3683 } | |
3684 | |
3685 size_t* surviving_young_words() { | |
3686 // We add on to hide entry 0 which accumulates surviving words for | |
3687 // age -1 regions (i.e. non-young ones) | |
3688 return _surviving_young_words; | |
3689 } | |
3690 | |
3691 void retire_alloc_buffers() { | |
3692 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
3693 size_t waste = _alloc_buffers[ap].words_remaining(); | |
3694 add_to_alloc_buffer_waste(waste); | |
3695 _alloc_buffers[ap].retire(true, false); | |
3696 } | |
3697 } | |
3698 | |
3699 void trim_queue() { | |
3700 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { | |
3701 oop *ref_to_scan = NULL; | |
3702 if (overflowed_refs_to_scan() == 0) { | |
3703 pop_from_queue(ref_to_scan); | |
3704 } else { | |
3705 pop_from_overflow_queue(ref_to_scan); | |
3706 } | |
3707 if (ref_to_scan != NULL) { | |
3708 if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) { | |
3709 _partial_scan_cl->do_oop_nv(ref_to_scan); | |
3710 } else { | |
3711 // Note: we can use "raw" versions of "region_containing" because | |
3712 // "obj_to_scan" is definitely in the heap, and is not in a | |
3713 // humongous region. | |
3714 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); | |
3715 _evac_cl->set_region(r); | |
3716 _evac_cl->do_oop_nv(ref_to_scan); | |
3717 } | |
3718 } | |
3719 } | |
3720 } | |
3721 }; | |
3722 | |
3723 | |
3724 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | |
3725 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), | |
3726 _par_scan_state(par_scan_state) { } | |
3727 | |
3728 // This closure is applied to the fields of the objects that have just been copied. | |
3729 // Should probably be made inline and moved in g1OopClosures.inline.hpp. | |
3730 void G1ParScanClosure::do_oop_nv(oop* p) { | |
3731 oop obj = *p; | |
3732 if (obj != NULL) { | |
3733 if (_g1->obj_in_cs(obj)) { | |
3734 if (obj->is_forwarded()) { | |
3735 *p = obj->forwardee(); | |
3736 } else { | |
3737 _par_scan_state->push_on_queue(p); | |
3738 return; | |
3739 } | |
3740 } | |
3741 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); | |
3742 } | |
3743 } | |
3744 | |
3745 void G1ParCopyHelper::mark_forwardee(oop* p) { | |
3746 // This is called _after_ do_oop_work has been called, hence after | |
3747 // the object has been relocated to its new location and *p points | |
3748 // to its new location. | |
3749 | |
3750 oop thisOop = *p; | |
3751 if (thisOop != NULL) { | |
3752 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), | |
3753 "shouldn't still be in the CSet if evacuation didn't fail."); | |
3754 HeapWord* addr = (HeapWord*)thisOop; | |
3755 if (_g1->is_in_g1_reserved(addr)) | |
3756 _cm->grayRoot(oop(addr)); | |
3757 } | |
3758 } | |
3759 | |
3760 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { | |
3761 size_t word_sz = old->size(); | |
3762 HeapRegion* from_region = _g1->heap_region_containing_raw(old); | |
3763 // +1 to make the -1 indexes valid... | |
3764 int young_index = from_region->young_index_in_cset()+1; | |
3765 assert( (from_region->is_young() && young_index > 0) || | |
3766 (!from_region->is_young() && young_index == 0), "invariant" ); | |
3767 G1CollectorPolicy* g1p = _g1->g1_policy(); | |
3768 markOop m = old->mark(); | |
3769 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(), | |
3770 word_sz); | |
3771 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); | |
3772 oop obj = oop(obj_ptr); | |
3773 | |
3774 if (obj_ptr == NULL) { | |
3775 // This will either forward-to-self, or detect that someone else has | |
3776 // installed a forwarding pointer. | |
3777 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); | |
3778 return _g1->handle_evacuation_failure_par(cl, old); | |
3779 } | |
3780 | |
3781 oop forward_ptr = old->forward_to_atomic(obj); | |
3782 if (forward_ptr == NULL) { | |
3783 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); | |
3784 obj->set_mark(m); | |
3785 if (g1p->track_object_age(alloc_purpose)) { | |
3786 obj->incr_age(); | |
3787 } | |
3788 // preserve "next" mark bit | |
3789 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { | |
3790 if (!use_local_bitmaps || | |
3791 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { | |
3792 // if we couldn't mark it on the local bitmap (this happens when | |
3793 // the object was not allocated in the GCLab), we have to bite | |
3794 // the bullet and do the standard parallel mark | |
3795 _cm->markAndGrayObjectIfNecessary(obj); | |
3796 } | |
3797 #if 1 | |
3798 if (_g1->isMarkedNext(old)) { | |
3799 _cm->nextMarkBitMap()->parClear((HeapWord*)old); | |
3800 } | |
3801 #endif | |
3802 } | |
3803 | |
3804 size_t* surv_young_words = _par_scan_state->surviving_young_words(); | |
3805 surv_young_words[young_index] += word_sz; | |
3806 | |
3807 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { | |
3808 arrayOop(old)->set_length(0); | |
3809 _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK)); | |
3810 } else { | |
3811 _scanner->set_region(_g1->heap_region_containing(obj)); | |
3812 obj->oop_iterate_backwards(_scanner); | |
3813 } | |
3814 } else { | |
3815 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); | |
3816 obj = forward_ptr; | |
3817 } | |
3818 return obj; | |
3819 } | |
3820 | |
3821 template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> | |
3822 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) { | |
3823 oop obj = *p; | |
3824 assert(barrier != G1BarrierRS || obj != NULL, | |
3825 "Precondition: G1BarrierRS implies obj is nonNull"); | |
3826 | |
3827 if (obj != NULL) { | |
3828 if (_g1->obj_in_cs(obj)) { | |
3829 #if G1_REM_SET_LOGGING | |
3830 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.", | |
3831 p, (void*) obj); | |
3832 #endif | |
3833 if (obj->is_forwarded()) { | |
3834 *p = obj->forwardee(); | |
3835 } else { | |
3836 *p = copy_to_survivor_space(obj); | |
3837 } | |
3838 // When scanning the RS, we only care about objs in CS. | |
3839 if (barrier == G1BarrierRS) { | |
3840 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); | |
3841 } | |
3842 } | |
3843 // When scanning moved objs, must look at all oops. | |
3844 if (barrier == G1BarrierEvac) { | |
3845 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); | |
3846 } | |
3847 | |
3848 if (do_gen_barrier) { | |
3849 par_do_barrier(p); | |
3850 } | |
3851 } | |
3852 } | |
3853 | |
3854 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); | |
3855 | |
3856 template <class T> void G1ParScanPartialArrayClosure::process_array_chunk( | |
3857 oop obj, int start, int end) { | |
3858 // process our set of indices (include header in first chunk) | |
3859 assert(start < end, "invariant"); | |
3860 T* const base = (T*)objArrayOop(obj)->base(); | |
3861 T* const start_addr = base + start; | |
3862 T* const end_addr = base + end; | |
3863 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); | |
3864 _scanner.set_region(_g1->heap_region_containing(obj)); | |
3865 obj->oop_iterate(&_scanner, mr); | |
3866 } | |
3867 | |
3868 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { | |
3869 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); | |
3870 oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK); | |
3871 assert(old->is_objArray(), "must be obj array"); | |
3872 assert(old->is_forwarded(), "must be forwarded"); | |
3873 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
3874 | |
3875 objArrayOop obj = objArrayOop(old->forwardee()); | |
3876 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); | |
3877 // Process ParGCArrayScanChunk elements now | |
3878 // and push the remainder back onto queue | |
3879 int start = arrayOop(old)->length(); | |
3880 int end = obj->length(); | |
3881 int remainder = end - start; | |
3882 assert(start <= end, "just checking"); | |
3883 if (remainder > 2 * ParGCArrayScanChunk) { | |
3884 // Test above combines last partial chunk with a full chunk | |
3885 end = start + ParGCArrayScanChunk; | |
3886 arrayOop(old)->set_length(end); | |
3887 // Push remainder. | |
3888 _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK)); | |
3889 } else { | |
3890 // Restore length so that the heap remains parsable in | |
3891 // case of evacuation failure. | |
3892 arrayOop(old)->set_length(end); | |
3893 } | |
3894 | |
3895 // process our set of indices (include header in first chunk) | |
3896 process_array_chunk<oop>(obj, start, end); | |
3897 oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start); | |
3898 oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length | |
3899 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); | |
3900 _scanner.set_region(_g1->heap_region_containing(obj)); | |
3901 obj->oop_iterate(&_scanner, mr); | |
3902 } | |
3903 | |
3904 int G1ScanAndBalanceClosure::_nq = 0; | |
3905 | |
3906 class G1ParEvacuateFollowersClosure : public VoidClosure { | |
3907 protected: | |
3908 G1CollectedHeap* _g1h; | |
3909 G1ParScanThreadState* _par_scan_state; | |
3910 RefToScanQueueSet* _queues; | |
3911 ParallelTaskTerminator* _terminator; | |
3912 | |
3913 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } | |
3914 RefToScanQueueSet* queues() { return _queues; } | |
3915 ParallelTaskTerminator* terminator() { return _terminator; } | |
3916 | |
3917 public: | |
3918 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | |
3919 G1ParScanThreadState* par_scan_state, | |
3920 RefToScanQueueSet* queues, | |
3921 ParallelTaskTerminator* terminator) | |
3922 : _g1h(g1h), _par_scan_state(par_scan_state), | |
3923 _queues(queues), _terminator(terminator) {} | |
3924 | |
3925 void do_void() { | |
3926 G1ParScanThreadState* pss = par_scan_state(); | |
3927 while (true) { | |
3928 oop* ref_to_scan; | |
3929 pss->trim_queue(); | |
3930 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); | |
3931 if (queues()->steal(pss->queue_num(), | |
3932 pss->hash_seed(), | |
3933 ref_to_scan)) { | |
3934 IF_G1_DETAILED_STATS(pss->note_steal()); | |
3935 pss->push_on_queue(ref_to_scan); | |
3936 continue; | |
3937 } | |
3938 pss->start_term_time(); | |
3939 if (terminator()->offer_termination()) break; | |
3940 pss->end_term_time(); | |
3941 } | |
3942 pss->end_term_time(); | |
3943 pss->retire_alloc_buffers(); | |
3944 } | |
3945 }; | |
3946 | |
3947 class G1ParTask : public AbstractGangTask { | |
3948 protected: | |
3949 G1CollectedHeap* _g1h; | |
3950 RefToScanQueueSet *_queues; | |
3951 ParallelTaskTerminator _terminator; | |
3952 | |
3953 Mutex _stats_lock; | |
3954 Mutex* stats_lock() { return &_stats_lock; } | |
3955 | |
3956 size_t getNCards() { | |
3957 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
3958 / G1BlockOffsetSharedArray::N_bytes; | |
3959 } | |
3960 | |
3961 public: | |
3962 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) | |
3963 : AbstractGangTask("G1 collection"), | |
3964 _g1h(g1h), | |
3965 _queues(task_queues), | |
3966 _terminator(workers, _queues), | |
3967 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) | |
3968 {} | |
3969 | |
3970 RefToScanQueueSet* queues() { return _queues; } | |
3971 | |
3972 RefToScanQueue *work_queue(int i) { | |
3973 return queues()->queue(i); | |
3974 } | |
3975 | |
3976 void work(int i) { | |
3977 ResourceMark rm; | |
3978 HandleMark hm; | |
3979 | |
3980 G1ParScanThreadState pss(_g1h, i); | |
3981 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); | |
3982 G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss); | |
3983 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); | |
3984 | |
3985 pss.set_evac_closure(&scan_evac_cl); | |
3986 pss.set_evac_failure_closure(&evac_failure_cl); | |
3987 pss.set_partial_scan_closure(&partial_scan_cl); | |
3988 | |
3989 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); | |
3990 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); | |
3991 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); | |
3992 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); | |
3993 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); | |
3994 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); | |
3995 | |
3996 OopsInHeapRegionClosure *scan_root_cl; | |
3997 OopsInHeapRegionClosure *scan_perm_cl; | |
3998 OopsInHeapRegionClosure *scan_so_cl; | |
3999 | |
4000 if (_g1h->g1_policy()->should_initiate_conc_mark()) { | |
4001 scan_root_cl = &scan_mark_root_cl; | |
4002 scan_perm_cl = &scan_mark_perm_cl; | |
4003 scan_so_cl = &scan_mark_heap_rs_cl; | |
4004 } else { | |
4005 scan_root_cl = &only_scan_root_cl; | |
4006 scan_perm_cl = &only_scan_perm_cl; | |
4007 scan_so_cl = &only_scan_heap_rs_cl; | |
4008 } | |
4009 | |
4010 pss.start_strong_roots(); | |
4011 _g1h->g1_process_strong_roots(/* not collecting perm */ false, | |
4012 SharedHeap::SO_AllClasses, | |
4013 scan_root_cl, | |
4014 &only_scan_heap_rs_cl, | |
4015 scan_so_cl, | |
4016 scan_perm_cl, | |
4017 i); | |
4018 pss.end_strong_roots(); | |
4019 { | |
4020 double start = os::elapsedTime(); | |
4021 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | |
4022 evac.do_void(); | |
4023 double elapsed_ms = (os::elapsedTime()-start)*1000.0; | |
4024 double term_ms = pss.term_time()*1000.0; | |
4025 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); | |
4026 _g1h->g1_policy()->record_termination_time(i, term_ms); | |
4027 } | |
4028 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); | |
4029 | |
4030 // Clean up any par-expanded rem sets. | |
4031 HeapRegionRemSet::par_cleanup(); | |
4032 | |
4033 MutexLocker x(stats_lock()); | |
4034 if (ParallelGCVerbose) { | |
4035 gclog_or_tty->print("Thread %d complete:\n", i); | |
4036 #if G1_DETAILED_STATS | |
4037 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", | |
4038 pss.pushes(), | |
4039 pss.pops(), | |
4040 pss.overflow_pushes(), | |
4041 pss.steals(), | |
4042 pss.steal_attempts()); | |
4043 #endif | |
4044 double elapsed = pss.elapsed(); | |
4045 double strong_roots = pss.strong_roots_time(); | |
4046 double term = pss.term_time(); | |
4047 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" | |
4048 " Strong roots: %7.2f ms (%6.2f%%)\n" | |
4049 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", | |
4050 elapsed * 1000.0, | |
4051 strong_roots * 1000.0, (strong_roots*100.0/elapsed), | |
4052 term * 1000.0, (term*100.0/elapsed), | |
4053 pss.term_attempts()); | |
4054 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); | |
4055 gclog_or_tty->print(" Waste: %8dK\n" | |
4056 " Alloc Buffer: %8dK\n" | |
4057 " Undo: %8dK\n", | |
4058 (total_waste * HeapWordSize) / K, | |
4059 (pss.alloc_buffer_waste() * HeapWordSize) / K, | |
4060 (pss.undo_waste() * HeapWordSize) / K); | |
4061 } | |
4062 | |
4063 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); | |
4064 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); | |
4065 } | |
4066 }; | |
4067 | |
4068 // *** Common G1 Evacuation Stuff | |
4069 | |
4070 class G1CountClosure: public OopsInHeapRegionClosure { | |
4071 public: | |
4072 int n; | |
4073 G1CountClosure() : n(0) {} | |
4074 void do_oop(narrowOop* p) { | |
4075 guarantee(false, "NYI"); | |
4076 } | |
4077 void do_oop(oop* p) { | |
4078 oop obj = *p; | |
4079 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), | |
4080 "Rem set closure called on non-rem-set pointer."); | |
4081 n++; | |
4082 } | |
4083 }; | |
4084 | |
4085 static G1CountClosure count_closure; | |
4086 | |
4087 void | |
4088 G1CollectedHeap:: | |
4089 g1_process_strong_roots(bool collecting_perm_gen, | |
4090 SharedHeap::ScanningOption so, | |
4091 OopClosure* scan_non_heap_roots, | |
4092 OopsInHeapRegionClosure* scan_rs, | |
4093 OopsInHeapRegionClosure* scan_so, | |
4094 OopsInGenClosure* scan_perm, | |
4095 int worker_i) { | |
4096 // First scan the strong roots, including the perm gen. | |
4097 double ext_roots_start = os::elapsedTime(); | |
4098 double closure_app_time_sec = 0.0; | |
4099 | |
4100 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | |
4101 BufferingOopsInGenClosure buf_scan_perm(scan_perm); | |
4102 buf_scan_perm.set_generation(perm_gen()); | |
4103 | |
4104 process_strong_roots(collecting_perm_gen, so, | |
4105 &buf_scan_non_heap_roots, | |
4106 &buf_scan_perm); | |
4107 // Finish up any enqueued closure apps. | |
4108 buf_scan_non_heap_roots.done(); | |
4109 buf_scan_perm.done(); | |
4110 double ext_roots_end = os::elapsedTime(); | |
4111 g1_policy()->reset_obj_copy_time(worker_i); | |
4112 double obj_copy_time_sec = | |
4113 buf_scan_non_heap_roots.closure_app_seconds() + | |
4114 buf_scan_perm.closure_app_seconds(); | |
4115 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | |
4116 double ext_root_time_ms = | |
4117 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; | |
4118 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); | |
4119 | |
4120 // Scan strong roots in mark stack. | |
4121 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { | |
4122 concurrent_mark()->oops_do(scan_non_heap_roots); | |
4123 } | |
4124 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; | |
4125 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); | |
4126 | |
4127 // XXX What should this be doing in the parallel case? | |
4128 g1_policy()->record_collection_pause_end_CH_strong_roots(); | |
4129 if (G1VerifyRemSet) { | |
4130 // :::: FIXME :::: | |
4131 // The stupid remembered set doesn't know how to filter out dead | |
4132 // objects, which the smart one does, and so when it is created | |
4133 // and then compared the number of entries in each differs and | |
4134 // the verification code fails. | |
4135 guarantee(false, "verification code is broken, see note"); | |
4136 | |
4137 // Let's make sure that the current rem set agrees with the stupidest | |
4138 // one possible! | |
4139 bool refs_enabled = ref_processor()->discovery_enabled(); | |
4140 if (refs_enabled) ref_processor()->disable_discovery(); | |
4141 StupidG1RemSet stupid(this); | |
4142 count_closure.n = 0; | |
4143 stupid.oops_into_collection_set_do(&count_closure, worker_i); | |
4144 int stupid_n = count_closure.n; | |
4145 count_closure.n = 0; | |
4146 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i); | |
4147 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ."); | |
4148 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n); | |
4149 if (refs_enabled) ref_processor()->enable_discovery(); | |
4150 } | |
4151 if (scan_so != NULL) { | |
4152 scan_scan_only_set(scan_so, worker_i); | |
4153 } | |
4154 // Now scan the complement of the collection set. | |
4155 if (scan_rs != NULL) { | |
4156 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); | |
4157 } | |
4158 // Finish with the ref_processor roots. | |
4159 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | |
4160 ref_processor()->oops_do(scan_non_heap_roots); | |
4161 } | |
4162 g1_policy()->record_collection_pause_end_G1_strong_roots(); | |
4163 _process_strong_tasks->all_tasks_completed(); | |
4164 } | |
4165 | |
4166 void | |
4167 G1CollectedHeap::scan_scan_only_region(HeapRegion* r, | |
4168 OopsInHeapRegionClosure* oc, | |
4169 int worker_i) { | |
4170 HeapWord* startAddr = r->bottom(); | |
4171 HeapWord* endAddr = r->used_region().end(); | |
4172 | |
4173 oc->set_region(r); | |
4174 | |
4175 HeapWord* p = r->bottom(); | |
4176 HeapWord* t = r->top(); | |
4177 guarantee( p == r->next_top_at_mark_start(), "invariant" ); | |
4178 while (p < t) { | |
4179 oop obj = oop(p); | |
4180 p += obj->oop_iterate(oc); | |
4181 } | |
4182 } | |
4183 | |
4184 void | |
4185 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
4186 int worker_i) { | |
4187 double start = os::elapsedTime(); | |
4188 | |
4189 BufferingOopsInHeapRegionClosure boc(oc); | |
4190 | |
4191 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); | |
4192 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); | |
4193 | |
4194 OopsInHeapRegionClosure *foc; | |
4195 if (g1_policy()->should_initiate_conc_mark()) | |
4196 foc = &scan_and_mark; | |
4197 else | |
4198 foc = &scan_only; | |
4199 | |
4200 HeapRegion* hr; | |
4201 int n = 0; | |
4202 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { | |
4203 scan_scan_only_region(hr, foc, worker_i); | |
4204 ++n; | |
4205 } | |
4206 boc.done(); | |
4207 | |
4208 double closure_app_s = boc.closure_app_seconds(); | |
4209 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); | |
4210 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; | |
4211 g1_policy()->record_scan_only_time(worker_i, ms, n); | |
4212 } | |
4213 | |
4214 void | |
4215 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, | |
4216 OopClosure* non_root_closure) { | |
4217 SharedHeap::process_weak_roots(root_closure, non_root_closure); | |
4218 } | |
4219 | |
4220 | |
4221 class SaveMarksClosure: public HeapRegionClosure { | |
4222 public: | |
4223 bool doHeapRegion(HeapRegion* r) { | |
4224 r->save_marks(); | |
4225 return false; | |
4226 } | |
4227 }; | |
4228 | |
4229 void G1CollectedHeap::save_marks() { | |
4230 if (ParallelGCThreads == 0) { | |
4231 SaveMarksClosure sm; | |
4232 heap_region_iterate(&sm); | |
4233 } | |
4234 // We do this even in the parallel case | |
4235 perm_gen()->save_marks(); | |
4236 } | |
4237 | |
4238 void G1CollectedHeap::evacuate_collection_set() { | |
4239 set_evacuation_failed(false); | |
4240 | |
4241 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
4242 concurrent_g1_refine()->set_use_cache(false); | |
4243 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); | |
4244 | |
4245 set_par_threads(n_workers); | |
4246 G1ParTask g1_par_task(this, n_workers, _task_queues); | |
4247 | |
4248 init_for_evac_failure(NULL); | |
4249 | |
4250 change_strong_roots_parity(); // In preparation for parallel strong roots. | |
4251 rem_set()->prepare_for_younger_refs_iterate(true); | |
4252 double start_par = os::elapsedTime(); | |
4253 | |
4254 if (ParallelGCThreads > 0) { | |
4255 // The individual threads will set their evac-failure closures. | |
4256 workers()->run_task(&g1_par_task); | |
4257 } else { | |
4258 g1_par_task.work(0); | |
4259 } | |
4260 | |
4261 double par_time = (os::elapsedTime() - start_par) * 1000.0; | |
4262 g1_policy()->record_par_time(par_time); | |
4263 set_par_threads(0); | |
4264 // Is this the right thing to do here? We don't save marks | |
4265 // on individual heap regions when we allocate from | |
4266 // them in parallel, so this seems like the correct place for this. | |
4267 all_alloc_regions_note_end_of_copying(); | |
4268 { | |
4269 G1IsAliveClosure is_alive(this); | |
4270 G1KeepAliveClosure keep_alive(this); | |
4271 JNIHandles::weak_oops_do(&is_alive, &keep_alive); | |
4272 } | |
4273 | |
4274 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | |
4275 concurrent_g1_refine()->set_use_cache(true); | |
4276 | |
4277 finalize_for_evac_failure(); | |
4278 | |
4279 // Must do this before removing self-forwarding pointers, which clears | |
4280 // the per-region evac-failure flags. | |
4281 concurrent_mark()->complete_marking_in_collection_set(); | |
4282 | |
4283 if (evacuation_failed()) { | |
4284 remove_self_forwarding_pointers(); | |
4285 | |
4286 if (PrintGCDetails) { | |
4287 gclog_or_tty->print(" (evacuation failed)"); | |
4288 } else if (PrintGC) { | |
4289 gclog_or_tty->print("--"); | |
4290 } | |
4291 } | |
4292 | |
4293 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
4294 } | |
4295 | |
4296 void G1CollectedHeap::free_region(HeapRegion* hr) { | |
4297 size_t pre_used = 0; | |
4298 size_t cleared_h_regions = 0; | |
4299 size_t freed_regions = 0; | |
4300 UncleanRegionList local_list; | |
4301 | |
4302 HeapWord* start = hr->bottom(); | |
4303 HeapWord* end = hr->prev_top_at_mark_start(); | |
4304 size_t used_bytes = hr->used(); | |
4305 size_t live_bytes = hr->max_live_bytes(); | |
4306 if (used_bytes > 0) { | |
4307 guarantee( live_bytes <= used_bytes, "invariant" ); | |
4308 } else { | |
4309 guarantee( live_bytes == 0, "invariant" ); | |
4310 } | |
4311 | |
4312 size_t garbage_bytes = used_bytes - live_bytes; | |
4313 if (garbage_bytes > 0) | |
4314 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); | |
4315 | |
4316 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, | |
4317 &local_list); | |
4318 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, | |
4319 &local_list); | |
4320 } | |
4321 | |
4322 void | |
4323 G1CollectedHeap::free_region_work(HeapRegion* hr, | |
4324 size_t& pre_used, | |
4325 size_t& cleared_h_regions, | |
4326 size_t& freed_regions, | |
4327 UncleanRegionList* list, | |
4328 bool par) { | |
4329 assert(!hr->popular(), "should not free popular regions"); | |
4330 pre_used += hr->used(); | |
4331 if (hr->isHumongous()) { | |
4332 assert(hr->startsHumongous(), | |
4333 "Only the start of a humongous region should be freed."); | |
4334 int ind = _hrs->find(hr); | |
4335 assert(ind != -1, "Should have an index."); | |
4336 // Clear the start region. | |
4337 hr->hr_clear(par, true /*clear_space*/); | |
4338 list->insert_before_head(hr); | |
4339 cleared_h_regions++; | |
4340 freed_regions++; | |
4341 // Clear any continued regions. | |
4342 ind++; | |
4343 while ((size_t)ind < n_regions()) { | |
4344 HeapRegion* hrc = _hrs->at(ind); | |
4345 if (!hrc->continuesHumongous()) break; | |
4346 // Otherwise, does continue the H region. | |
4347 assert(hrc->humongous_start_region() == hr, "Huh?"); | |
4348 hrc->hr_clear(par, true /*clear_space*/); | |
4349 cleared_h_regions++; | |
4350 freed_regions++; | |
4351 list->insert_before_head(hrc); | |
4352 ind++; | |
4353 } | |
4354 } else { | |
4355 hr->hr_clear(par, true /*clear_space*/); | |
4356 list->insert_before_head(hr); | |
4357 freed_regions++; | |
4358 // If we're using clear2, this should not be enabled. | |
4359 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); | |
4360 } | |
4361 } | |
4362 | |
4363 void G1CollectedHeap::finish_free_region_work(size_t pre_used, | |
4364 size_t cleared_h_regions, | |
4365 size_t freed_regions, | |
4366 UncleanRegionList* list) { | |
4367 if (list != NULL && list->sz() > 0) { | |
4368 prepend_region_list_on_unclean_list(list); | |
4369 } | |
4370 // Acquire a lock, if we're parallel, to update possibly-shared | |
4371 // variables. | |
4372 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; | |
4373 { | |
4374 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
4375 _summary_bytes_used -= pre_used; | |
4376 _num_humongous_regions -= (int) cleared_h_regions; | |
4377 _free_regions += freed_regions; | |
4378 } | |
4379 } | |
4380 | |
4381 | |
4382 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { | |
4383 while (list != NULL) { | |
4384 guarantee( list->is_young(), "invariant" ); | |
4385 | |
4386 HeapWord* bottom = list->bottom(); | |
4387 HeapWord* end = list->end(); | |
4388 MemRegion mr(bottom, end); | |
4389 ct_bs->dirty(mr); | |
4390 | |
4391 list = list->get_next_young_region(); | |
4392 } | |
4393 } | |
4394 | |
4395 void G1CollectedHeap::cleanUpCardTable() { | |
4396 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); | |
4397 double start = os::elapsedTime(); | |
4398 | |
4399 ct_bs->clear(_g1_committed); | |
4400 | |
4401 // now, redirty the cards of the scan-only and survivor regions | |
4402 // (it seemed faster to do it this way, instead of iterating over | |
4403 // all regions and then clearing / dirtying as approprite) | |
4404 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); | |
4405 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); | |
4406 | |
4407 double elapsed = os::elapsedTime() - start; | |
4408 g1_policy()->record_clear_ct_time( elapsed * 1000.0); | |
4409 } | |
4410 | |
4411 | |
4412 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { | |
4413 // First do any popular regions. | |
4414 HeapRegion* hr; | |
4415 while ((hr = popular_region_to_evac()) != NULL) { | |
4416 evac_popular_region(hr); | |
4417 } | |
4418 // Now do heuristic pauses. | |
4419 if (g1_policy()->should_do_collection_pause(word_size)) { | |
4420 do_collection_pause(); | |
4421 } | |
4422 } | |
4423 | |
4424 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { | |
4425 double young_time_ms = 0.0; | |
4426 double non_young_time_ms = 0.0; | |
4427 | |
4428 G1CollectorPolicy* policy = g1_policy(); | |
4429 | |
4430 double start_sec = os::elapsedTime(); | |
4431 bool non_young = true; | |
4432 | |
4433 HeapRegion* cur = cs_head; | |
4434 int age_bound = -1; | |
4435 size_t rs_lengths = 0; | |
4436 | |
4437 while (cur != NULL) { | |
4438 if (non_young) { | |
4439 if (cur->is_young()) { | |
4440 double end_sec = os::elapsedTime(); | |
4441 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4442 non_young_time_ms += elapsed_ms; | |
4443 | |
4444 start_sec = os::elapsedTime(); | |
4445 non_young = false; | |
4446 } | |
4447 } else { | |
4448 if (!cur->is_on_free_list()) { | |
4449 double end_sec = os::elapsedTime(); | |
4450 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4451 young_time_ms += elapsed_ms; | |
4452 | |
4453 start_sec = os::elapsedTime(); | |
4454 non_young = true; | |
4455 } | |
4456 } | |
4457 | |
4458 rs_lengths += cur->rem_set()->occupied(); | |
4459 | |
4460 HeapRegion* next = cur->next_in_collection_set(); | |
4461 assert(cur->in_collection_set(), "bad CS"); | |
4462 cur->set_next_in_collection_set(NULL); | |
4463 cur->set_in_collection_set(false); | |
4464 | |
4465 if (cur->is_young()) { | |
4466 int index = cur->young_index_in_cset(); | |
4467 guarantee( index != -1, "invariant" ); | |
4468 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); | |
4469 size_t words_survived = _surviving_young_words[index]; | |
4470 cur->record_surv_words_in_group(words_survived); | |
4471 } else { | |
4472 int index = cur->young_index_in_cset(); | |
4473 guarantee( index == -1, "invariant" ); | |
4474 } | |
4475 | |
4476 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || | |
4477 (!cur->is_young() && cur->young_index_in_cset() == -1), | |
4478 "invariant" ); | |
4479 | |
4480 if (!cur->evacuation_failed()) { | |
4481 // And the region is empty. | |
4482 assert(!cur->is_empty(), | |
4483 "Should not have empty regions in a CS."); | |
4484 free_region(cur); | |
4485 } else { | |
4486 guarantee( !cur->is_scan_only(), "should not be scan only" ); | |
4487 cur->uninstall_surv_rate_group(); | |
4488 if (cur->is_young()) | |
4489 cur->set_young_index_in_cset(-1); | |
4490 cur->set_not_young(); | |
4491 cur->set_evacuation_failed(false); | |
4492 } | |
4493 cur = next; | |
4494 } | |
4495 | |
4496 policy->record_max_rs_lengths(rs_lengths); | |
4497 policy->cset_regions_freed(); | |
4498 | |
4499 double end_sec = os::elapsedTime(); | |
4500 double elapsed_ms = (end_sec - start_sec) * 1000.0; | |
4501 if (non_young) | |
4502 non_young_time_ms += elapsed_ms; | |
4503 else | |
4504 young_time_ms += elapsed_ms; | |
4505 | |
4506 policy->record_young_free_cset_time_ms(young_time_ms); | |
4507 policy->record_non_young_free_cset_time_ms(non_young_time_ms); | |
4508 } | |
4509 | |
4510 HeapRegion* | |
4511 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { | |
4512 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4513 HeapRegion* res = pop_unclean_region_list_locked(); | |
4514 if (res != NULL) { | |
4515 assert(!res->continuesHumongous() && | |
4516 res->zero_fill_state() != HeapRegion::Allocated, | |
4517 "Only free regions on unclean list."); | |
4518 if (zero_filled) { | |
4519 res->ensure_zero_filled_locked(); | |
4520 res->set_zero_fill_allocated(); | |
4521 } | |
4522 } | |
4523 return res; | |
4524 } | |
4525 | |
4526 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { | |
4527 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4528 return alloc_region_from_unclean_list_locked(zero_filled); | |
4529 } | |
4530 | |
4531 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { | |
4532 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4533 put_region_on_unclean_list_locked(r); | |
4534 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4535 } | |
4536 | |
4537 void G1CollectedHeap::set_unclean_regions_coming(bool b) { | |
4538 MutexLockerEx x(Cleanup_mon); | |
4539 set_unclean_regions_coming_locked(b); | |
4540 } | |
4541 | |
4542 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { | |
4543 assert(Cleanup_mon->owned_by_self(), "Precondition"); | |
4544 _unclean_regions_coming = b; | |
4545 // Wake up mutator threads that might be waiting for completeCleanup to | |
4546 // finish. | |
4547 if (!b) Cleanup_mon->notify_all(); | |
4548 } | |
4549 | |
4550 void G1CollectedHeap::wait_for_cleanup_complete() { | |
4551 MutexLockerEx x(Cleanup_mon); | |
4552 wait_for_cleanup_complete_locked(); | |
4553 } | |
4554 | |
4555 void G1CollectedHeap::wait_for_cleanup_complete_locked() { | |
4556 assert(Cleanup_mon->owned_by_self(), "precondition"); | |
4557 while (_unclean_regions_coming) { | |
4558 Cleanup_mon->wait(); | |
4559 } | |
4560 } | |
4561 | |
4562 void | |
4563 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { | |
4564 assert(ZF_mon->owned_by_self(), "precondition."); | |
4565 _unclean_region_list.insert_before_head(r); | |
4566 } | |
4567 | |
4568 void | |
4569 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { | |
4570 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4571 prepend_region_list_on_unclean_list_locked(list); | |
4572 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. | |
4573 } | |
4574 | |
4575 void | |
4576 G1CollectedHeap:: | |
4577 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { | |
4578 assert(ZF_mon->owned_by_self(), "precondition."); | |
4579 _unclean_region_list.prepend_list(list); | |
4580 } | |
4581 | |
4582 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { | |
4583 assert(ZF_mon->owned_by_self(), "precondition."); | |
4584 HeapRegion* res = _unclean_region_list.pop(); | |
4585 if (res != NULL) { | |
4586 // Inform ZF thread that there's a new unclean head. | |
4587 if (_unclean_region_list.hd() != NULL && should_zf()) | |
4588 ZF_mon->notify_all(); | |
4589 } | |
4590 return res; | |
4591 } | |
4592 | |
4593 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { | |
4594 assert(ZF_mon->owned_by_self(), "precondition."); | |
4595 return _unclean_region_list.hd(); | |
4596 } | |
4597 | |
4598 | |
4599 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { | |
4600 assert(ZF_mon->owned_by_self(), "Precondition"); | |
4601 HeapRegion* r = peek_unclean_region_list_locked(); | |
4602 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { | |
4603 // Result of below must be equal to "r", since we hold the lock. | |
4604 (void)pop_unclean_region_list_locked(); | |
4605 put_free_region_on_list_locked(r); | |
4606 return true; | |
4607 } else { | |
4608 return false; | |
4609 } | |
4610 } | |
4611 | |
4612 bool G1CollectedHeap::move_cleaned_region_to_free_list() { | |
4613 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4614 return move_cleaned_region_to_free_list_locked(); | |
4615 } | |
4616 | |
4617 | |
4618 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { | |
4619 assert(ZF_mon->owned_by_self(), "precondition."); | |
4620 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4621 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, | |
4622 "Regions on free list must be zero filled"); | |
4623 assert(!r->isHumongous(), "Must not be humongous."); | |
4624 assert(r->is_empty(), "Better be empty"); | |
4625 assert(!r->is_on_free_list(), | |
4626 "Better not already be on free list"); | |
4627 assert(!r->is_on_unclean_list(), | |
4628 "Better not already be on unclean list"); | |
4629 r->set_on_free_list(true); | |
4630 r->set_next_on_free_list(_free_region_list); | |
4631 _free_region_list = r; | |
4632 _free_region_list_size++; | |
4633 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4634 } | |
4635 | |
4636 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { | |
4637 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4638 put_free_region_on_list_locked(r); | |
4639 } | |
4640 | |
4641 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { | |
4642 assert(ZF_mon->owned_by_self(), "precondition."); | |
4643 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4644 HeapRegion* res = _free_region_list; | |
4645 if (res != NULL) { | |
4646 _free_region_list = res->next_from_free_list(); | |
4647 _free_region_list_size--; | |
4648 res->set_on_free_list(false); | |
4649 res->set_next_on_free_list(NULL); | |
4650 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4651 } | |
4652 return res; | |
4653 } | |
4654 | |
4655 | |
4656 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { | |
4657 // By self, or on behalf of self. | |
4658 assert(Heap_lock->is_locked(), "Precondition"); | |
4659 HeapRegion* res = NULL; | |
4660 bool first = true; | |
4661 while (res == NULL) { | |
4662 if (zero_filled || !first) { | |
4663 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4664 res = pop_free_region_list_locked(); | |
4665 if (res != NULL) { | |
4666 assert(!res->zero_fill_is_allocated(), | |
4667 "No allocated regions on free list."); | |
4668 res->set_zero_fill_allocated(); | |
4669 } else if (!first) { | |
4670 break; // We tried both, time to return NULL. | |
4671 } | |
4672 } | |
4673 | |
4674 if (res == NULL) { | |
4675 res = alloc_region_from_unclean_list(zero_filled); | |
4676 } | |
4677 assert(res == NULL || | |
4678 !zero_filled || | |
4679 res->zero_fill_is_allocated(), | |
4680 "We must have allocated the region we're returning"); | |
4681 first = false; | |
4682 } | |
4683 return res; | |
4684 } | |
4685 | |
4686 void G1CollectedHeap::remove_allocated_regions_from_lists() { | |
4687 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4688 { | |
4689 HeapRegion* prev = NULL; | |
4690 HeapRegion* cur = _unclean_region_list.hd(); | |
4691 while (cur != NULL) { | |
4692 HeapRegion* next = cur->next_from_unclean_list(); | |
4693 if (cur->zero_fill_is_allocated()) { | |
4694 // Remove from the list. | |
4695 if (prev == NULL) { | |
4696 (void)_unclean_region_list.pop(); | |
4697 } else { | |
4698 _unclean_region_list.delete_after(prev); | |
4699 } | |
4700 cur->set_on_unclean_list(false); | |
4701 cur->set_next_on_unclean_list(NULL); | |
4702 } else { | |
4703 prev = cur; | |
4704 } | |
4705 cur = next; | |
4706 } | |
4707 assert(_unclean_region_list.sz() == unclean_region_list_length(), | |
4708 "Inv"); | |
4709 } | |
4710 | |
4711 { | |
4712 HeapRegion* prev = NULL; | |
4713 HeapRegion* cur = _free_region_list; | |
4714 while (cur != NULL) { | |
4715 HeapRegion* next = cur->next_from_free_list(); | |
4716 if (cur->zero_fill_is_allocated()) { | |
4717 // Remove from the list. | |
4718 if (prev == NULL) { | |
4719 _free_region_list = cur->next_from_free_list(); | |
4720 } else { | |
4721 prev->set_next_on_free_list(cur->next_from_free_list()); | |
4722 } | |
4723 cur->set_on_free_list(false); | |
4724 cur->set_next_on_free_list(NULL); | |
4725 _free_region_list_size--; | |
4726 } else { | |
4727 prev = cur; | |
4728 } | |
4729 cur = next; | |
4730 } | |
4731 assert(_free_region_list_size == free_region_list_length(), "Inv"); | |
4732 } | |
4733 } | |
4734 | |
4735 bool G1CollectedHeap::verify_region_lists() { | |
4736 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4737 return verify_region_lists_locked(); | |
4738 } | |
4739 | |
4740 bool G1CollectedHeap::verify_region_lists_locked() { | |
4741 HeapRegion* unclean = _unclean_region_list.hd(); | |
4742 while (unclean != NULL) { | |
4743 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); | |
4744 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); | |
4745 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, | |
4746 "Everything else is possible."); | |
4747 unclean = unclean->next_from_unclean_list(); | |
4748 } | |
4749 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); | |
4750 | |
4751 HeapRegion* free_r = _free_region_list; | |
4752 while (free_r != NULL) { | |
4753 assert(free_r->is_on_free_list(), "Well, it is!"); | |
4754 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); | |
4755 switch (free_r->zero_fill_state()) { | |
4756 case HeapRegion::NotZeroFilled: | |
4757 case HeapRegion::ZeroFilling: | |
4758 guarantee(false, "Should not be on free list."); | |
4759 break; | |
4760 default: | |
4761 // Everything else is possible. | |
4762 break; | |
4763 } | |
4764 free_r = free_r->next_from_free_list(); | |
4765 } | |
4766 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); | |
4767 // If we didn't do an assertion... | |
4768 return true; | |
4769 } | |
4770 | |
4771 size_t G1CollectedHeap::free_region_list_length() { | |
4772 assert(ZF_mon->owned_by_self(), "precondition."); | |
4773 size_t len = 0; | |
4774 HeapRegion* cur = _free_region_list; | |
4775 while (cur != NULL) { | |
4776 len++; | |
4777 cur = cur->next_from_free_list(); | |
4778 } | |
4779 return len; | |
4780 } | |
4781 | |
4782 size_t G1CollectedHeap::unclean_region_list_length() { | |
4783 assert(ZF_mon->owned_by_self(), "precondition."); | |
4784 return _unclean_region_list.length(); | |
4785 } | |
4786 | |
4787 size_t G1CollectedHeap::n_regions() { | |
4788 return _hrs->length(); | |
4789 } | |
4790 | |
4791 size_t G1CollectedHeap::max_regions() { | |
4792 return | |
4793 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / | |
4794 HeapRegion::GrainBytes; | |
4795 } | |
4796 | |
4797 size_t G1CollectedHeap::free_regions() { | |
4798 /* Possibly-expensive assert. | |
4799 assert(_free_regions == count_free_regions(), | |
4800 "_free_regions is off."); | |
4801 */ | |
4802 return _free_regions; | |
4803 } | |
4804 | |
4805 bool G1CollectedHeap::should_zf() { | |
4806 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; | |
4807 } | |
4808 | |
4809 class RegionCounter: public HeapRegionClosure { | |
4810 size_t _n; | |
4811 public: | |
4812 RegionCounter() : _n(0) {} | |
4813 bool doHeapRegion(HeapRegion* r) { | |
4814 if (r->is_empty() && !r->popular()) { | |
4815 assert(!r->isHumongous(), "H regions should not be empty."); | |
4816 _n++; | |
4817 } | |
4818 return false; | |
4819 } | |
4820 int res() { return (int) _n; } | |
4821 }; | |
4822 | |
4823 size_t G1CollectedHeap::count_free_regions() { | |
4824 RegionCounter rc; | |
4825 heap_region_iterate(&rc); | |
4826 size_t n = rc.res(); | |
4827 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) | |
4828 n--; | |
4829 return n; | |
4830 } | |
4831 | |
4832 size_t G1CollectedHeap::count_free_regions_list() { | |
4833 size_t n = 0; | |
4834 size_t o = 0; | |
4835 ZF_mon->lock_without_safepoint_check(); | |
4836 HeapRegion* cur = _free_region_list; | |
4837 while (cur != NULL) { | |
4838 cur = cur->next_from_free_list(); | |
4839 n++; | |
4840 } | |
4841 size_t m = unclean_region_list_length(); | |
4842 ZF_mon->unlock(); | |
4843 return n + m; | |
4844 } | |
4845 | |
4846 bool G1CollectedHeap::should_set_young_locked() { | |
4847 assert(heap_lock_held_for_gc(), | |
4848 "the heap lock should already be held by or for this thread"); | |
4849 return (g1_policy()->in_young_gc_mode() && | |
4850 g1_policy()->should_add_next_region_to_young_list()); | |
4851 } | |
4852 | |
4853 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { | |
4854 assert(heap_lock_held_for_gc(), | |
4855 "the heap lock should already be held by or for this thread"); | |
4856 _young_list->push_region(hr); | |
4857 g1_policy()->set_region_short_lived(hr); | |
4858 } | |
4859 | |
4860 class NoYoungRegionsClosure: public HeapRegionClosure { | |
4861 private: | |
4862 bool _success; | |
4863 public: | |
4864 NoYoungRegionsClosure() : _success(true) { } | |
4865 bool doHeapRegion(HeapRegion* r) { | |
4866 if (r->is_young()) { | |
4867 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", | |
4868 r->bottom(), r->end()); | |
4869 _success = false; | |
4870 } | |
4871 return false; | |
4872 } | |
4873 bool success() { return _success; } | |
4874 }; | |
4875 | |
4876 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, | |
4877 bool check_sample) { | |
4878 bool ret = true; | |
4879 | |
4880 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); | |
4881 if (!ignore_scan_only_list) { | |
4882 NoYoungRegionsClosure closure; | |
4883 heap_region_iterate(&closure); | |
4884 ret = ret && closure.success(); | |
4885 } | |
4886 | |
4887 return ret; | |
4888 } | |
4889 | |
4890 void G1CollectedHeap::empty_young_list() { | |
4891 assert(heap_lock_held_for_gc(), | |
4892 "the heap lock should already be held by or for this thread"); | |
4893 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); | |
4894 | |
4895 _young_list->empty_list(); | |
4896 } | |
4897 | |
4898 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { | |
4899 bool no_allocs = true; | |
4900 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { | |
4901 HeapRegion* r = _gc_alloc_regions[ap]; | |
4902 no_allocs = r == NULL || r->saved_mark_at_top(); | |
4903 } | |
4904 return no_allocs; | |
4905 } | |
4906 | |
4907 void G1CollectedHeap::all_alloc_regions_note_end_of_copying() { | |
4908 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { | |
4909 HeapRegion* r = _gc_alloc_regions[ap]; | |
4910 if (r != NULL) { | |
4911 // Check for aliases. | |
4912 bool has_processed_alias = false; | |
4913 for (int i = 0; i < ap; ++i) { | |
4914 if (_gc_alloc_regions[i] == r) { | |
4915 has_processed_alias = true; | |
4916 break; | |
4917 } | |
4918 } | |
4919 if (!has_processed_alias) { | |
4920 r->note_end_of_copying(); | |
4921 g1_policy()->record_after_bytes(r->used()); | |
4922 } | |
4923 } | |
4924 } | |
4925 } | |
4926 | |
4927 | |
4928 // Done at the start of full GC. | |
4929 void G1CollectedHeap::tear_down_region_lists() { | |
4930 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4931 while (pop_unclean_region_list_locked() != NULL) ; | |
4932 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, | |
4933 "Postconditions of loop.") | |
4934 while (pop_free_region_list_locked() != NULL) ; | |
4935 assert(_free_region_list == NULL, "Postcondition of loop."); | |
4936 if (_free_region_list_size != 0) { | |
4937 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); | |
4938 print(); | |
4939 } | |
4940 assert(_free_region_list_size == 0, "Postconditions of loop."); | |
4941 } | |
4942 | |
4943 | |
4944 class RegionResetter: public HeapRegionClosure { | |
4945 G1CollectedHeap* _g1; | |
4946 int _n; | |
4947 public: | |
4948 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
4949 bool doHeapRegion(HeapRegion* r) { | |
4950 if (r->continuesHumongous()) return false; | |
4951 if (r->top() > r->bottom()) { | |
4952 if (r->top() < r->end()) { | |
4953 Copy::fill_to_words(r->top(), | |
4954 pointer_delta(r->end(), r->top())); | |
4955 } | |
4956 r->set_zero_fill_allocated(); | |
4957 } else { | |
4958 assert(r->is_empty(), "tautology"); | |
4959 if (r->popular()) { | |
4960 if (r->zero_fill_state() != HeapRegion::Allocated) { | |
4961 r->ensure_zero_filled_locked(); | |
4962 r->set_zero_fill_allocated(); | |
4963 } | |
4964 } else { | |
4965 _n++; | |
4966 switch (r->zero_fill_state()) { | |
4967 case HeapRegion::NotZeroFilled: | |
4968 case HeapRegion::ZeroFilling: | |
4969 _g1->put_region_on_unclean_list_locked(r); | |
4970 break; | |
4971 case HeapRegion::Allocated: | |
4972 r->set_zero_fill_complete(); | |
4973 // no break; go on to put on free list. | |
4974 case HeapRegion::ZeroFilled: | |
4975 _g1->put_free_region_on_list_locked(r); | |
4976 break; | |
4977 } | |
4978 } | |
4979 } | |
4980 return false; | |
4981 } | |
4982 | |
4983 int getFreeRegionCount() {return _n;} | |
4984 }; | |
4985 | |
4986 // Done at the end of full GC. | |
4987 void G1CollectedHeap::rebuild_region_lists() { | |
4988 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
4989 // This needs to go at the end of the full GC. | |
4990 RegionResetter rs; | |
4991 heap_region_iterate(&rs); | |
4992 _free_regions = rs.getFreeRegionCount(); | |
4993 // Tell the ZF thread it may have work to do. | |
4994 if (should_zf()) ZF_mon->notify_all(); | |
4995 } | |
4996 | |
4997 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { | |
4998 G1CollectedHeap* _g1; | |
4999 int _n; | |
5000 public: | |
5001 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} | |
5002 bool doHeapRegion(HeapRegion* r) { | |
5003 if (r->continuesHumongous()) return false; | |
5004 if (r->top() > r->bottom()) { | |
5005 // There are assertions in "set_zero_fill_needed()" below that | |
5006 // require top() == bottom(), so this is technically illegal. | |
5007 // We'll skirt the law here, by making that true temporarily. | |
5008 DEBUG_ONLY(HeapWord* save_top = r->top(); | |
5009 r->set_top(r->bottom())); | |
5010 r->set_zero_fill_needed(); | |
5011 DEBUG_ONLY(r->set_top(save_top)); | |
5012 } | |
5013 return false; | |
5014 } | |
5015 }; | |
5016 | |
5017 // Done at the start of full GC. | |
5018 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { | |
5019 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
5020 // This needs to go at the end of the full GC. | |
5021 UsedRegionsNeedZeroFillSetter rs; | |
5022 heap_region_iterate(&rs); | |
5023 } | |
5024 | |
5025 class CountObjClosure: public ObjectClosure { | |
5026 size_t _n; | |
5027 public: | |
5028 CountObjClosure() : _n(0) {} | |
5029 void do_object(oop obj) { _n++; } | |
5030 size_t n() { return _n; } | |
5031 }; | |
5032 | |
5033 size_t G1CollectedHeap::pop_object_used_objs() { | |
5034 size_t sum_objs = 0; | |
5035 for (int i = 0; i < G1NumPopularRegions; i++) { | |
5036 CountObjClosure cl; | |
5037 _hrs->at(i)->object_iterate(&cl); | |
5038 sum_objs += cl.n(); | |
5039 } | |
5040 return sum_objs; | |
5041 } | |
5042 | |
5043 size_t G1CollectedHeap::pop_object_used_bytes() { | |
5044 size_t sum_bytes = 0; | |
5045 for (int i = 0; i < G1NumPopularRegions; i++) { | |
5046 sum_bytes += _hrs->at(i)->used(); | |
5047 } | |
5048 return sum_bytes; | |
5049 } | |
5050 | |
5051 | |
5052 static int nq = 0; | |
5053 | |
5054 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) { | |
5055 while (_cur_pop_hr_index < G1NumPopularRegions) { | |
5056 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); | |
5057 HeapWord* res = cur_pop_region->allocate(word_size); | |
5058 if (res != NULL) { | |
5059 // We account for popular objs directly in the used summary: | |
5060 _summary_bytes_used += (word_size * HeapWordSize); | |
5061 return res; | |
5062 } | |
5063 // Otherwise, try the next region (first making sure that we remember | |
5064 // the last "top" value as the "next_top_at_mark_start", so that | |
5065 // objects made popular during markings aren't automatically considered | |
5066 // live). | |
5067 cur_pop_region->note_end_of_copying(); | |
5068 // Otherwise, try the next region. | |
5069 _cur_pop_hr_index++; | |
5070 } | |
5071 // XXX: For now !!! | |
5072 vm_exit_out_of_memory(word_size, | |
5073 "Not enough pop obj space (To Be Fixed)"); | |
5074 return NULL; | |
5075 } | |
5076 | |
5077 class HeapRegionList: public CHeapObj { | |
5078 public: | |
5079 HeapRegion* hr; | |
5080 HeapRegionList* next; | |
5081 }; | |
5082 | |
5083 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) { | |
5084 // This might happen during parallel GC, so protect by this lock. | |
5085 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
5086 // We don't schedule regions whose evacuations are already pending, or | |
5087 // are already being evacuated. | |
5088 if (!r->popular_pending() && !r->in_collection_set()) { | |
5089 r->set_popular_pending(true); | |
5090 if (G1TracePopularity) { | |
5091 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" " | |
5092 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.", | |
5093 r, r->bottom(), r->end()); | |
5094 } | |
5095 HeapRegionList* hrl = new HeapRegionList; | |
5096 hrl->hr = r; | |
5097 hrl->next = _popular_regions_to_be_evacuated; | |
5098 _popular_regions_to_be_evacuated = hrl; | |
5099 } | |
5100 } | |
5101 | |
5102 HeapRegion* G1CollectedHeap::popular_region_to_evac() { | |
5103 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
5104 HeapRegion* res = NULL; | |
5105 while (_popular_regions_to_be_evacuated != NULL && res == NULL) { | |
5106 HeapRegionList* hrl = _popular_regions_to_be_evacuated; | |
5107 _popular_regions_to_be_evacuated = hrl->next; | |
5108 res = hrl->hr; | |
5109 // The G1RSPopLimit may have increased, so recheck here... | |
5110 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) { | |
5111 // Hah: don't need to schedule. | |
5112 if (G1TracePopularity) { | |
5113 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" " | |
5114 "["PTR_FORMAT", "PTR_FORMAT") " | |
5115 "for pop-object evacuation (size %d < limit %d)", | |
5116 res, res->bottom(), res->end(), | |
5117 res->rem_set()->occupied(), G1RSPopLimit); | |
5118 } | |
5119 res->set_popular_pending(false); | |
5120 res = NULL; | |
5121 } | |
5122 // We do not reset res->popular() here; if we did so, it would allow | |
5123 // the region to be "rescheduled" for popularity evacuation. Instead, | |
5124 // this is done in the collection pause, with the world stopped. | |
5125 // So the invariant is that the regions in the list have the popularity | |
5126 // boolean set, but having the boolean set does not imply membership | |
5127 // on the list (though there can at most one such pop-pending region | |
5128 // not on the list at any time). | |
5129 delete hrl; | |
5130 } | |
5131 return res; | |
5132 } | |
5133 | |
5134 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) { | |
5135 while (true) { | |
5136 // Don't want to do a GC pause while cleanup is being completed! | |
5137 wait_for_cleanup_complete(); | |
5138 | |
5139 // Read the GC count while holding the Heap_lock | |
5140 int gc_count_before = SharedHeap::heap()->total_collections(); | |
5141 g1_policy()->record_stop_world_start(); | |
5142 | |
5143 { | |
5144 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
5145 VM_G1PopRegionCollectionPause op(gc_count_before, hr); | |
5146 VMThread::execute(&op); | |
5147 | |
5148 // If the prolog succeeded, we didn't do a GC for this. | |
5149 if (op.prologue_succeeded()) break; | |
5150 } | |
5151 // Otherwise we didn't. We should recheck the size, though, since | |
5152 // the limit may have increased... | |
5153 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) { | |
5154 hr->set_popular_pending(false); | |
5155 break; | |
5156 } | |
5157 } | |
5158 } | |
5159 | |
5160 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) { | |
5161 Atomic::inc(obj_rc_addr(obj)); | |
5162 } | |
5163 | |
5164 class CountRCClosure: public OopsInHeapRegionClosure { | |
5165 G1CollectedHeap* _g1h; | |
5166 bool _parallel; | |
5167 public: | |
5168 CountRCClosure(G1CollectedHeap* g1h) : | |
5169 _g1h(g1h), _parallel(ParallelGCThreads > 0) | |
5170 {} | |
5171 void do_oop(narrowOop* p) { | |
5172 guarantee(false, "NYI"); | |
5173 } | |
5174 void do_oop(oop* p) { | |
5175 oop obj = *p; | |
5176 assert(obj != NULL, "Precondition."); | |
5177 if (_parallel) { | |
5178 // We go sticky at the limit to avoid excess contention. | |
5179 // If we want to track the actual RC's further, we'll need to keep a | |
5180 // per-thread hash table or something for the popular objects. | |
5181 if (_g1h->obj_rc(obj) < G1ObjPopLimit) { | |
5182 _g1h->atomic_inc_obj_rc(obj); | |
5183 } | |
5184 } else { | |
5185 _g1h->inc_obj_rc(obj); | |
5186 } | |
5187 } | |
5188 }; | |
5189 | |
5190 class EvacPopObjClosure: public ObjectClosure { | |
5191 G1CollectedHeap* _g1h; | |
5192 size_t _pop_objs; | |
5193 size_t _max_rc; | |
5194 public: | |
5195 EvacPopObjClosure(G1CollectedHeap* g1h) : | |
5196 _g1h(g1h), _pop_objs(0), _max_rc(0) {} | |
5197 | |
5198 void do_object(oop obj) { | |
5199 size_t rc = _g1h->obj_rc(obj); | |
5200 _max_rc = MAX2(rc, _max_rc); | |
5201 if (rc >= (size_t) G1ObjPopLimit) { | |
5202 _g1h->_pop_obj_rc_at_copy.add((double)rc); | |
5203 size_t word_sz = obj->size(); | |
5204 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz); | |
5205 oop new_pop_obj = (oop)new_pop_loc; | |
5206 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz); | |
5207 obj->forward_to(new_pop_obj); | |
5208 G1ScanAndBalanceClosure scan_and_balance(_g1h); | |
5209 new_pop_obj->oop_iterate_backwards(&scan_and_balance); | |
5210 // preserve "next" mark bit if marking is in progress. | |
5211 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) { | |
5212 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj); | |
5213 } | |
5214 | |
5215 if (G1TracePopularity) { | |
5216 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT | |
5217 " pop (%d), move to " PTR_FORMAT, | |
5218 (void*) obj, word_sz, | |
5219 _g1h->obj_rc(obj), (void*) new_pop_obj); | |
5220 } | |
5221 _pop_objs++; | |
5222 } | |
5223 } | |
5224 size_t pop_objs() { return _pop_objs; } | |
5225 size_t max_rc() { return _max_rc; } | |
5226 }; | |
5227 | |
5228 class G1ParCountRCTask : public AbstractGangTask { | |
5229 G1CollectedHeap* _g1h; | |
5230 BitMap _bm; | |
5231 | |
5232 size_t getNCards() { | |
5233 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) | |
5234 / G1BlockOffsetSharedArray::N_bytes; | |
5235 } | |
5236 CountRCClosure _count_rc_closure; | |
5237 public: | |
5238 G1ParCountRCTask(G1CollectedHeap* g1h) : | |
5239 AbstractGangTask("G1 Par RC Count task"), | |
5240 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h) | |
5241 {} | |
5242 | |
5243 void work(int i) { | |
5244 ResourceMark rm; | |
5245 HandleMark hm; | |
5246 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i); | |
5247 } | |
5248 }; | |
5249 | |
5250 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) { | |
5251 // We're evacuating a single region (for popularity). | |
5252 if (G1TracePopularity) { | |
5253 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")", | |
5254 popular_region->bottom(), popular_region->end()); | |
5255 } | |
5256 g1_policy()->set_single_region_collection_set(popular_region); | |
5257 size_t max_rc; | |
5258 if (!compute_reference_counts_and_evac_popular(popular_region, | |
5259 &max_rc)) { | |
5260 // We didn't evacuate any popular objects. | |
5261 // We increase the RS popularity limit, to prevent this from | |
5262 // happening in the future. | |
5263 if (G1RSPopLimit < (1 << 30)) { | |
5264 G1RSPopLimit *= 2; | |
5265 } | |
5266 // For now, interesting enough for a message: | |
5267 #if 1 | |
5268 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), " | |
5269 "failed to find a pop object (max = %d).", | |
5270 popular_region->bottom(), popular_region->end(), | |
5271 max_rc); | |
5272 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit); | |
5273 #endif // 0 | |
5274 // Also, we reset the collection set to NULL, to make the rest of | |
5275 // the collection do nothing. | |
5276 assert(popular_region->next_in_collection_set() == NULL, | |
5277 "should be single-region."); | |
5278 popular_region->set_in_collection_set(false); | |
5279 popular_region->set_popular_pending(false); | |
5280 g1_policy()->clear_collection_set(); | |
5281 } | |
5282 } | |
5283 | |
5284 bool G1CollectedHeap:: | |
5285 compute_reference_counts_and_evac_popular(HeapRegion* popular_region, | |
5286 size_t* max_rc) { | |
5287 HeapWord* rc_region_bot; | |
5288 HeapWord* rc_region_end; | |
5289 | |
5290 // Set up the reference count region. | |
5291 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords); | |
5292 if (rc_region != NULL) { | |
5293 rc_region_bot = rc_region->bottom(); | |
5294 rc_region_end = rc_region->end(); | |
5295 } else { | |
5296 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords); | |
5297 if (rc_region_bot == NULL) { | |
5298 vm_exit_out_of_memory(HeapRegion::GrainWords, | |
5299 "No space for RC region."); | |
5300 } | |
5301 rc_region_end = rc_region_bot + HeapRegion::GrainWords; | |
5302 } | |
5303 | |
5304 if (G1TracePopularity) | |
5305 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")", | |
5306 rc_region_bot, rc_region_end); | |
5307 if (rc_region_bot > popular_region->bottom()) { | |
5308 _rc_region_above = true; | |
5309 _rc_region_diff = | |
5310 pointer_delta(rc_region_bot, popular_region->bottom(), 1); | |
5311 } else { | |
5312 assert(rc_region_bot < popular_region->bottom(), "Can't be equal."); | |
5313 _rc_region_above = false; | |
5314 _rc_region_diff = | |
5315 pointer_delta(popular_region->bottom(), rc_region_bot, 1); | |
5316 } | |
5317 g1_policy()->record_pop_compute_rc_start(); | |
5318 // Count external references. | |
5319 g1_rem_set()->prepare_for_oops_into_collection_set_do(); | |
5320 if (ParallelGCThreads > 0) { | |
5321 | |
5322 set_par_threads(workers()->total_workers()); | |
5323 G1ParCountRCTask par_count_rc_task(this); | |
5324 workers()->run_task(&par_count_rc_task); | |
5325 set_par_threads(0); | |
5326 | |
5327 } else { | |
5328 CountRCClosure count_rc_closure(this); | |
5329 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0); | |
5330 } | |
5331 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); | |
5332 g1_policy()->record_pop_compute_rc_end(); | |
5333 | |
5334 // Now evacuate popular objects. | |
5335 g1_policy()->record_pop_evac_start(); | |
5336 EvacPopObjClosure evac_pop_obj_cl(this); | |
5337 popular_region->object_iterate(&evac_pop_obj_cl); | |
5338 *max_rc = evac_pop_obj_cl.max_rc(); | |
5339 | |
5340 // Make sure the last "top" value of the current popular region is copied | |
5341 // as the "next_top_at_mark_start", so that objects made popular during | |
5342 // markings aren't automatically considered live. | |
5343 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); | |
5344 cur_pop_region->note_end_of_copying(); | |
5345 | |
5346 if (rc_region != NULL) { | |
5347 free_region(rc_region); | |
5348 } else { | |
5349 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot); | |
5350 } | |
5351 g1_policy()->record_pop_evac_end(); | |
5352 | |
5353 return evac_pop_obj_cl.pop_objs() > 0; | |
5354 } | |
5355 | |
5356 class CountPopObjInfoClosure: public HeapRegionClosure { | |
5357 size_t _objs; | |
5358 size_t _bytes; | |
5359 | |
5360 class CountObjClosure: public ObjectClosure { | |
5361 int _n; | |
5362 public: | |
5363 CountObjClosure() : _n(0) {} | |
5364 void do_object(oop obj) { _n++; } | |
5365 size_t n() { return _n; } | |
5366 }; | |
5367 | |
5368 public: | |
5369 CountPopObjInfoClosure() : _objs(0), _bytes(0) {} | |
5370 bool doHeapRegion(HeapRegion* r) { | |
5371 _bytes += r->used(); | |
5372 CountObjClosure blk; | |
5373 r->object_iterate(&blk); | |
5374 _objs += blk.n(); | |
5375 return false; | |
5376 } | |
5377 size_t objs() { return _objs; } | |
5378 size_t bytes() { return _bytes; } | |
5379 }; | |
5380 | |
5381 | |
5382 void G1CollectedHeap::print_popularity_summary_info() const { | |
5383 CountPopObjInfoClosure blk; | |
5384 for (int i = 0; i <= _cur_pop_hr_index; i++) { | |
5385 blk.doHeapRegion(_hrs->at(i)); | |
5386 } | |
5387 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.", | |
5388 blk.objs(), blk.bytes()); | |
5389 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].", | |
5390 _pop_obj_rc_at_copy.avg(), | |
5391 _pop_obj_rc_at_copy.maximum(), | |
5392 _pop_obj_rc_at_copy.sd()); | |
5393 } | |
5394 | |
5395 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { | |
5396 _refine_cte_cl->set_concurrent(concurrent); | |
5397 } | |
5398 | |
5399 #ifndef PRODUCT | |
5400 | |
5401 class PrintHeapRegionClosure: public HeapRegionClosure { | |
5402 public: | |
5403 bool doHeapRegion(HeapRegion *r) { | |
5404 gclog_or_tty->print("Region: "PTR_FORMAT":", r); | |
5405 if (r != NULL) { | |
5406 if (r->is_on_free_list()) | |
5407 gclog_or_tty->print("Free "); | |
5408 if (r->is_young()) | |
5409 gclog_or_tty->print("Young "); | |
5410 if (r->isHumongous()) | |
5411 gclog_or_tty->print("Is Humongous "); | |
5412 r->print(); | |
5413 } | |
5414 return false; | |
5415 } | |
5416 }; | |
5417 | |
5418 class SortHeapRegionClosure : public HeapRegionClosure { | |
5419 size_t young_regions,free_regions, unclean_regions; | |
5420 size_t hum_regions, count; | |
5421 size_t unaccounted, cur_unclean, cur_alloc; | |
5422 size_t total_free; | |
5423 HeapRegion* cur; | |
5424 public: | |
5425 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), | |
5426 free_regions(0), unclean_regions(0), | |
5427 hum_regions(0), | |
5428 count(0), unaccounted(0), | |
5429 cur_alloc(0), total_free(0) | |
5430 {} | |
5431 bool doHeapRegion(HeapRegion *r) { | |
5432 count++; | |
5433 if (r->is_on_free_list()) free_regions++; | |
5434 else if (r->is_on_unclean_list()) unclean_regions++; | |
5435 else if (r->isHumongous()) hum_regions++; | |
5436 else if (r->is_young()) young_regions++; | |
5437 else if (r == cur) cur_alloc++; | |
5438 else unaccounted++; | |
5439 return false; | |
5440 } | |
5441 void print() { | |
5442 total_free = free_regions + unclean_regions; | |
5443 gclog_or_tty->print("%d regions\n", count); | |
5444 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", | |
5445 total_free, free_regions, unclean_regions); | |
5446 gclog_or_tty->print("%d humongous %d young\n", | |
5447 hum_regions, young_regions); | |
5448 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); | |
5449 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); | |
5450 } | |
5451 }; | |
5452 | |
5453 void G1CollectedHeap::print_region_counts() { | |
5454 SortHeapRegionClosure sc(_cur_alloc_region); | |
5455 PrintHeapRegionClosure cl; | |
5456 heap_region_iterate(&cl); | |
5457 heap_region_iterate(&sc); | |
5458 sc.print(); | |
5459 print_region_accounting_info(); | |
5460 }; | |
5461 | |
5462 bool G1CollectedHeap::regions_accounted_for() { | |
5463 // TODO: regions accounting for young/survivor/tenured | |
5464 return true; | |
5465 } | |
5466 | |
5467 bool G1CollectedHeap::print_region_accounting_info() { | |
5468 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions); | |
5469 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", | |
5470 free_regions(), | |
5471 count_free_regions(), count_free_regions_list(), | |
5472 _free_region_list_size, _unclean_region_list.sz()); | |
5473 gclog_or_tty->print_cr("cur_alloc: %d.", | |
5474 (_cur_alloc_region == NULL ? 0 : 1)); | |
5475 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); | |
5476 | |
5477 // TODO: check regions accounting for young/survivor/tenured | |
5478 return true; | |
5479 } | |
5480 | |
5481 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { | |
5482 HeapRegion* hr = heap_region_containing(p); | |
5483 if (hr == NULL) { | |
5484 return is_in_permanent(p); | |
5485 } else { | |
5486 return hr->is_in(p); | |
5487 } | |
5488 } | |
5489 #endif // PRODUCT | |
5490 | |
5491 void G1CollectedHeap::g1_unimplemented() { | |
5492 // Unimplemented(); | |
5493 } | |
5494 | |
5495 | |
5496 // Local Variables: *** | |
5497 // c-indentation-style: gnu *** | |
5498 // End: *** |