342
|
1 /*
|
|
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #include "incls/_precompiled.incl"
|
|
26 #include "incls/_g1CollectedHeap.cpp.incl"
|
|
27
|
|
28 // turn it on so that the contents of the young list (scan-only /
|
|
29 // to-be-collected) are printed at "strategic" points before / during
|
|
30 // / after the collection --- this is useful for debugging
|
|
31 #define SCAN_ONLY_VERBOSE 0
|
|
32 // CURRENT STATUS
|
|
33 // This file is under construction. Search for "FIXME".
|
|
34
|
|
35 // INVARIANTS/NOTES
|
|
36 //
|
|
37 // All allocation activity covered by the G1CollectedHeap interface is
|
|
38 // serialized by acquiring the HeapLock. This happens in
|
|
39 // mem_allocate_work, which all such allocation functions call.
|
|
40 // (Note that this does not apply to TLAB allocation, which is not part
|
|
41 // of this interface: it is done by clients of this interface.)
|
|
42
|
|
43 // Local to this file.
|
|
44
|
|
45 // Finds the first HeapRegion.
|
|
46 // No longer used, but might be handy someday.
|
|
47
|
|
48 class FindFirstRegionClosure: public HeapRegionClosure {
|
|
49 HeapRegion* _a_region;
|
|
50 public:
|
|
51 FindFirstRegionClosure() : _a_region(NULL) {}
|
|
52 bool doHeapRegion(HeapRegion* r) {
|
|
53 _a_region = r;
|
|
54 return true;
|
|
55 }
|
|
56 HeapRegion* result() { return _a_region; }
|
|
57 };
|
|
58
|
|
59
|
|
60 class RefineCardTableEntryClosure: public CardTableEntryClosure {
|
|
61 SuspendibleThreadSet* _sts;
|
|
62 G1RemSet* _g1rs;
|
|
63 ConcurrentG1Refine* _cg1r;
|
|
64 bool _concurrent;
|
|
65 public:
|
|
66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
|
|
67 G1RemSet* g1rs,
|
|
68 ConcurrentG1Refine* cg1r) :
|
|
69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
|
|
70 {}
|
|
71 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
|
72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
|
|
73 if (_concurrent && _sts->should_yield()) {
|
|
74 // Caller will actually yield.
|
|
75 return false;
|
|
76 }
|
|
77 // Otherwise, we finished successfully; return true.
|
|
78 return true;
|
|
79 }
|
|
80 void set_concurrent(bool b) { _concurrent = b; }
|
|
81 };
|
|
82
|
|
83
|
|
84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
|
|
85 int _calls;
|
|
86 G1CollectedHeap* _g1h;
|
|
87 CardTableModRefBS* _ctbs;
|
|
88 int _histo[256];
|
|
89 public:
|
|
90 ClearLoggedCardTableEntryClosure() :
|
|
91 _calls(0)
|
|
92 {
|
|
93 _g1h = G1CollectedHeap::heap();
|
|
94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
|
|
95 for (int i = 0; i < 256; i++) _histo[i] = 0;
|
|
96 }
|
|
97 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
|
98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
|
99 _calls++;
|
|
100 unsigned char* ujb = (unsigned char*)card_ptr;
|
|
101 int ind = (int)(*ujb);
|
|
102 _histo[ind]++;
|
|
103 *card_ptr = -1;
|
|
104 }
|
|
105 return true;
|
|
106 }
|
|
107 int calls() { return _calls; }
|
|
108 void print_histo() {
|
|
109 gclog_or_tty->print_cr("Card table value histogram:");
|
|
110 for (int i = 0; i < 256; i++) {
|
|
111 if (_histo[i] != 0) {
|
|
112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
|
|
113 }
|
|
114 }
|
|
115 }
|
|
116 };
|
|
117
|
|
118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
|
|
119 int _calls;
|
|
120 G1CollectedHeap* _g1h;
|
|
121 CardTableModRefBS* _ctbs;
|
|
122 public:
|
|
123 RedirtyLoggedCardTableEntryClosure() :
|
|
124 _calls(0)
|
|
125 {
|
|
126 _g1h = G1CollectedHeap::heap();
|
|
127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
|
|
128 }
|
|
129 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
|
130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
|
131 _calls++;
|
|
132 *card_ptr = 0;
|
|
133 }
|
|
134 return true;
|
|
135 }
|
|
136 int calls() { return _calls; }
|
|
137 };
|
|
138
|
|
139 YoungList::YoungList(G1CollectedHeap* g1h)
|
|
140 : _g1h(g1h), _head(NULL),
|
|
141 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
|
|
142 _length(0), _scan_only_length(0),
|
|
143 _last_sampled_rs_lengths(0),
|
|
144 _survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0)
|
|
145 {
|
|
146 guarantee( check_list_empty(false), "just making sure..." );
|
|
147 }
|
|
148
|
|
149 void YoungList::push_region(HeapRegion *hr) {
|
|
150 assert(!hr->is_young(), "should not already be young");
|
|
151 assert(hr->get_next_young_region() == NULL, "cause it should!");
|
|
152
|
|
153 hr->set_next_young_region(_head);
|
|
154 _head = hr;
|
|
155
|
|
156 hr->set_young();
|
|
157 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
|
|
158 ++_length;
|
|
159 }
|
|
160
|
|
161 void YoungList::add_survivor_region(HeapRegion* hr) {
|
|
162 assert(!hr->is_survivor(), "should not already be for survived");
|
|
163 assert(hr->get_next_young_region() == NULL, "cause it should!");
|
|
164
|
|
165 hr->set_next_young_region(_survivor_head);
|
|
166 if (_survivor_head == NULL) {
|
|
167 _survivors_tail = hr;
|
|
168 }
|
|
169 _survivor_head = hr;
|
|
170
|
|
171 hr->set_survivor();
|
|
172 ++_survivor_length;
|
|
173 }
|
|
174
|
|
175 HeapRegion* YoungList::pop_region() {
|
|
176 while (_head != NULL) {
|
|
177 assert( length() > 0, "list should not be empty" );
|
|
178 HeapRegion* ret = _head;
|
|
179 _head = ret->get_next_young_region();
|
|
180 ret->set_next_young_region(NULL);
|
|
181 --_length;
|
|
182 assert(ret->is_young(), "region should be very young");
|
|
183
|
|
184 // Replace 'Survivor' region type with 'Young'. So the region will
|
|
185 // be treated as a young region and will not be 'confused' with
|
|
186 // newly created survivor regions.
|
|
187 if (ret->is_survivor()) {
|
|
188 ret->set_young();
|
|
189 }
|
|
190
|
|
191 if (!ret->is_scan_only()) {
|
|
192 return ret;
|
|
193 }
|
|
194
|
|
195 // scan-only, we'll add it to the scan-only list
|
|
196 if (_scan_only_tail == NULL) {
|
|
197 guarantee( _scan_only_head == NULL, "invariant" );
|
|
198
|
|
199 _scan_only_head = ret;
|
|
200 _curr_scan_only = ret;
|
|
201 } else {
|
|
202 guarantee( _scan_only_head != NULL, "invariant" );
|
|
203 _scan_only_tail->set_next_young_region(ret);
|
|
204 }
|
|
205 guarantee( ret->get_next_young_region() == NULL, "invariant" );
|
|
206 _scan_only_tail = ret;
|
|
207
|
|
208 // no need to be tagged as scan-only any more
|
|
209 ret->set_young();
|
|
210
|
|
211 ++_scan_only_length;
|
|
212 }
|
|
213 assert( length() == 0, "list should be empty" );
|
|
214 return NULL;
|
|
215 }
|
|
216
|
|
217 void YoungList::empty_list(HeapRegion* list) {
|
|
218 while (list != NULL) {
|
|
219 HeapRegion* next = list->get_next_young_region();
|
|
220 list->set_next_young_region(NULL);
|
|
221 list->uninstall_surv_rate_group();
|
|
222 list->set_not_young();
|
|
223 list = next;
|
|
224 }
|
|
225 }
|
|
226
|
|
227 void YoungList::empty_list() {
|
|
228 assert(check_list_well_formed(), "young list should be well formed");
|
|
229
|
|
230 empty_list(_head);
|
|
231 _head = NULL;
|
|
232 _length = 0;
|
|
233
|
|
234 empty_list(_scan_only_head);
|
|
235 _scan_only_head = NULL;
|
|
236 _scan_only_tail = NULL;
|
|
237 _scan_only_length = 0;
|
|
238 _curr_scan_only = NULL;
|
|
239
|
|
240 empty_list(_survivor_head);
|
|
241 _survivor_head = NULL;
|
|
242 _survivors_tail = NULL;
|
|
243 _survivor_length = 0;
|
|
244
|
|
245 _last_sampled_rs_lengths = 0;
|
|
246
|
|
247 assert(check_list_empty(false), "just making sure...");
|
|
248 }
|
|
249
|
|
250 bool YoungList::check_list_well_formed() {
|
|
251 bool ret = true;
|
|
252
|
|
253 size_t length = 0;
|
|
254 HeapRegion* curr = _head;
|
|
255 HeapRegion* last = NULL;
|
|
256 while (curr != NULL) {
|
|
257 if (!curr->is_young() || curr->is_scan_only()) {
|
|
258 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
|
|
259 "incorrectly tagged (%d, %d)",
|
|
260 curr->bottom(), curr->end(),
|
|
261 curr->is_young(), curr->is_scan_only());
|
|
262 ret = false;
|
|
263 }
|
|
264 ++length;
|
|
265 last = curr;
|
|
266 curr = curr->get_next_young_region();
|
|
267 }
|
|
268 ret = ret && (length == _length);
|
|
269
|
|
270 if (!ret) {
|
|
271 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
|
|
272 gclog_or_tty->print_cr("### list has %d entries, _length is %d",
|
|
273 length, _length);
|
|
274 }
|
|
275
|
|
276 bool scan_only_ret = true;
|
|
277 length = 0;
|
|
278 curr = _scan_only_head;
|
|
279 last = NULL;
|
|
280 while (curr != NULL) {
|
|
281 if (!curr->is_young() || curr->is_scan_only()) {
|
|
282 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
|
|
283 "incorrectly tagged (%d, %d)",
|
|
284 curr->bottom(), curr->end(),
|
|
285 curr->is_young(), curr->is_scan_only());
|
|
286 scan_only_ret = false;
|
|
287 }
|
|
288 ++length;
|
|
289 last = curr;
|
|
290 curr = curr->get_next_young_region();
|
|
291 }
|
|
292 scan_only_ret = scan_only_ret && (length == _scan_only_length);
|
|
293
|
|
294 if ( (last != _scan_only_tail) ||
|
|
295 (_scan_only_head == NULL && _scan_only_tail != NULL) ||
|
|
296 (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
|
|
297 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
|
|
298 scan_only_ret = false;
|
|
299 }
|
|
300
|
|
301 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
|
|
302 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
|
|
303 scan_only_ret = false;
|
|
304 }
|
|
305
|
|
306 if (!scan_only_ret) {
|
|
307 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
|
|
308 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
|
|
309 length, _scan_only_length);
|
|
310 }
|
|
311
|
|
312 return ret && scan_only_ret;
|
|
313 }
|
|
314
|
|
315 bool YoungList::check_list_empty(bool ignore_scan_only_list,
|
|
316 bool check_sample) {
|
|
317 bool ret = true;
|
|
318
|
|
319 if (_length != 0) {
|
|
320 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
|
|
321 _length);
|
|
322 ret = false;
|
|
323 }
|
|
324 if (check_sample && _last_sampled_rs_lengths != 0) {
|
|
325 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
|
|
326 ret = false;
|
|
327 }
|
|
328 if (_head != NULL) {
|
|
329 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
|
|
330 ret = false;
|
|
331 }
|
|
332 if (!ret) {
|
|
333 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
|
|
334 }
|
|
335
|
|
336 if (ignore_scan_only_list)
|
|
337 return ret;
|
|
338
|
|
339 bool scan_only_ret = true;
|
|
340 if (_scan_only_length != 0) {
|
|
341 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
|
|
342 _scan_only_length);
|
|
343 scan_only_ret = false;
|
|
344 }
|
|
345 if (_scan_only_head != NULL) {
|
|
346 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
|
|
347 scan_only_ret = false;
|
|
348 }
|
|
349 if (_scan_only_tail != NULL) {
|
|
350 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
|
|
351 scan_only_ret = false;
|
|
352 }
|
|
353 if (!scan_only_ret) {
|
|
354 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
|
|
355 }
|
|
356
|
|
357 return ret && scan_only_ret;
|
|
358 }
|
|
359
|
|
360 void
|
|
361 YoungList::rs_length_sampling_init() {
|
|
362 _sampled_rs_lengths = 0;
|
|
363 _curr = _head;
|
|
364 }
|
|
365
|
|
366 bool
|
|
367 YoungList::rs_length_sampling_more() {
|
|
368 return _curr != NULL;
|
|
369 }
|
|
370
|
|
371 void
|
|
372 YoungList::rs_length_sampling_next() {
|
|
373 assert( _curr != NULL, "invariant" );
|
|
374 _sampled_rs_lengths += _curr->rem_set()->occupied();
|
|
375 _curr = _curr->get_next_young_region();
|
|
376 if (_curr == NULL) {
|
|
377 _last_sampled_rs_lengths = _sampled_rs_lengths;
|
|
378 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
|
|
379 }
|
|
380 }
|
|
381
|
|
382 void
|
|
383 YoungList::reset_auxilary_lists() {
|
|
384 // We could have just "moved" the scan-only list to the young list.
|
|
385 // However, the scan-only list is ordered according to the region
|
|
386 // age in descending order, so, by moving one entry at a time, we
|
|
387 // ensure that it is recreated in ascending order.
|
|
388
|
|
389 guarantee( is_empty(), "young list should be empty" );
|
|
390 assert(check_list_well_formed(), "young list should be well formed");
|
|
391
|
|
392 // Add survivor regions to SurvRateGroup.
|
|
393 _g1h->g1_policy()->note_start_adding_survivor_regions();
|
|
394 for (HeapRegion* curr = _survivor_head;
|
|
395 curr != NULL;
|
|
396 curr = curr->get_next_young_region()) {
|
|
397 _g1h->g1_policy()->set_region_survivors(curr);
|
|
398 }
|
|
399 _g1h->g1_policy()->note_stop_adding_survivor_regions();
|
|
400
|
|
401 if (_survivor_head != NULL) {
|
|
402 _head = _survivor_head;
|
|
403 _length = _survivor_length + _scan_only_length;
|
|
404 _survivors_tail->set_next_young_region(_scan_only_head);
|
|
405 } else {
|
|
406 _head = _scan_only_head;
|
|
407 _length = _scan_only_length;
|
|
408 }
|
|
409
|
|
410 for (HeapRegion* curr = _scan_only_head;
|
|
411 curr != NULL;
|
|
412 curr = curr->get_next_young_region()) {
|
|
413 curr->recalculate_age_in_surv_rate_group();
|
|
414 }
|
|
415 _scan_only_head = NULL;
|
|
416 _scan_only_tail = NULL;
|
|
417 _scan_only_length = 0;
|
|
418 _curr_scan_only = NULL;
|
|
419
|
|
420 _survivor_head = NULL;
|
|
421 _survivors_tail = NULL;
|
|
422 _survivor_length = 0;
|
|
423 _g1h->g1_policy()->finished_recalculating_age_indexes();
|
|
424
|
|
425 assert(check_list_well_formed(), "young list should be well formed");
|
|
426 }
|
|
427
|
|
428 void YoungList::print() {
|
|
429 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head};
|
|
430 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"};
|
|
431
|
|
432 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
|
|
433 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
|
|
434 HeapRegion *curr = lists[list];
|
|
435 if (curr == NULL)
|
|
436 gclog_or_tty->print_cr(" empty");
|
|
437 while (curr != NULL) {
|
|
438 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
|
|
439 "age: %4d, y: %d, s-o: %d, surv: %d",
|
|
440 curr->bottom(), curr->end(),
|
|
441 curr->top(),
|
|
442 curr->prev_top_at_mark_start(),
|
|
443 curr->next_top_at_mark_start(),
|
|
444 curr->top_at_conc_mark_count(),
|
|
445 curr->age_in_surv_rate_group_cond(),
|
|
446 curr->is_young(),
|
|
447 curr->is_scan_only(),
|
|
448 curr->is_survivor());
|
|
449 curr = curr->get_next_young_region();
|
|
450 }
|
|
451 }
|
|
452
|
|
453 gclog_or_tty->print_cr("");
|
|
454 }
|
|
455
|
|
456 void G1CollectedHeap::stop_conc_gc_threads() {
|
|
457 _cg1r->cg1rThread()->stop();
|
|
458 _czft->stop();
|
|
459 _cmThread->stop();
|
|
460 }
|
|
461
|
|
462
|
|
463 void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
|
464 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
|
465 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
|
|
466
|
|
467 // Count the dirty cards at the start.
|
|
468 CountNonCleanMemRegionClosure count1(this);
|
|
469 ct_bs->mod_card_iterate(&count1);
|
|
470 int orig_count = count1.n();
|
|
471
|
|
472 // First clear the logged cards.
|
|
473 ClearLoggedCardTableEntryClosure clear;
|
|
474 dcqs.set_closure(&clear);
|
|
475 dcqs.apply_closure_to_all_completed_buffers();
|
|
476 dcqs.iterate_closure_all_threads(false);
|
|
477 clear.print_histo();
|
|
478
|
|
479 // Now ensure that there's no dirty cards.
|
|
480 CountNonCleanMemRegionClosure count2(this);
|
|
481 ct_bs->mod_card_iterate(&count2);
|
|
482 if (count2.n() != 0) {
|
|
483 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
|
|
484 count2.n(), orig_count);
|
|
485 }
|
|
486 guarantee(count2.n() == 0, "Card table should be clean.");
|
|
487
|
|
488 RedirtyLoggedCardTableEntryClosure redirty;
|
|
489 JavaThread::dirty_card_queue_set().set_closure(&redirty);
|
|
490 dcqs.apply_closure_to_all_completed_buffers();
|
|
491 dcqs.iterate_closure_all_threads(false);
|
|
492 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
|
|
493 clear.calls(), orig_count);
|
|
494 guarantee(redirty.calls() == clear.calls(),
|
|
495 "Or else mechanism is broken.");
|
|
496
|
|
497 CountNonCleanMemRegionClosure count3(this);
|
|
498 ct_bs->mod_card_iterate(&count3);
|
|
499 if (count3.n() != orig_count) {
|
|
500 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
|
|
501 orig_count, count3.n());
|
|
502 guarantee(count3.n() >= orig_count, "Should have restored them all.");
|
|
503 }
|
|
504
|
|
505 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
|
|
506 }
|
|
507
|
|
508 // Private class members.
|
|
509
|
|
510 G1CollectedHeap* G1CollectedHeap::_g1h;
|
|
511
|
|
512 // Private methods.
|
|
513
|
|
514 // Finds a HeapRegion that can be used to allocate a given size of block.
|
|
515
|
|
516
|
|
517 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
|
|
518 bool do_expand,
|
|
519 bool zero_filled) {
|
|
520 ConcurrentZFThread::note_region_alloc();
|
|
521 HeapRegion* res = alloc_free_region_from_lists(zero_filled);
|
|
522 if (res == NULL && do_expand) {
|
|
523 expand(word_size * HeapWordSize);
|
|
524 res = alloc_free_region_from_lists(zero_filled);
|
|
525 assert(res == NULL ||
|
|
526 (!res->isHumongous() &&
|
|
527 (!zero_filled ||
|
|
528 res->zero_fill_state() == HeapRegion::Allocated)),
|
|
529 "Alloc Regions must be zero filled (and non-H)");
|
|
530 }
|
|
531 if (res != NULL && res->is_empty()) _free_regions--;
|
|
532 assert(res == NULL ||
|
|
533 (!res->isHumongous() &&
|
|
534 (!zero_filled ||
|
|
535 res->zero_fill_state() == HeapRegion::Allocated)),
|
|
536 "Non-young alloc Regions must be zero filled (and non-H)");
|
|
537
|
|
538 if (G1TraceRegions) {
|
|
539 if (res != NULL) {
|
|
540 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
|
|
541 "top "PTR_FORMAT,
|
|
542 res->hrs_index(), res->bottom(), res->end(), res->top());
|
|
543 }
|
|
544 }
|
|
545
|
|
546 return res;
|
|
547 }
|
|
548
|
|
549 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
|
|
550 size_t word_size,
|
|
551 bool zero_filled) {
|
|
552 HeapRegion* alloc_region = NULL;
|
|
553 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
|
|
554 alloc_region = newAllocRegion_work(word_size, true, zero_filled);
|
|
555 if (purpose == GCAllocForSurvived && alloc_region != NULL) {
|
|
556 _young_list->add_survivor_region(alloc_region);
|
|
557 }
|
|
558 ++_gc_alloc_region_counts[purpose];
|
|
559 } else {
|
|
560 g1_policy()->note_alloc_region_limit_reached(purpose);
|
|
561 }
|
|
562 return alloc_region;
|
|
563 }
|
|
564
|
|
565 // If could fit into free regions w/o expansion, try.
|
|
566 // Otherwise, if can expand, do so.
|
|
567 // Otherwise, if using ex regions might help, try with ex given back.
|
|
568 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
|
|
569 assert(regions_accounted_for(), "Region leakage!");
|
|
570
|
|
571 // We can't allocate H regions while cleanupComplete is running, since
|
|
572 // some of the regions we find to be empty might not yet be added to the
|
|
573 // unclean list. (If we're already at a safepoint, this call is
|
|
574 // unnecessary, not to mention wrong.)
|
|
575 if (!SafepointSynchronize::is_at_safepoint())
|
|
576 wait_for_cleanup_complete();
|
|
577
|
|
578 size_t num_regions =
|
|
579 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
|
|
580
|
|
581 // Special case if < one region???
|
|
582
|
|
583 // Remember the ft size.
|
|
584 size_t x_size = expansion_regions();
|
|
585
|
|
586 HeapWord* res = NULL;
|
|
587 bool eliminated_allocated_from_lists = false;
|
|
588
|
|
589 // Can the allocation potentially fit in the free regions?
|
|
590 if (free_regions() >= num_regions) {
|
|
591 res = _hrs->obj_allocate(word_size);
|
|
592 }
|
|
593 if (res == NULL) {
|
|
594 // Try expansion.
|
|
595 size_t fs = _hrs->free_suffix();
|
|
596 if (fs + x_size >= num_regions) {
|
|
597 expand((num_regions - fs) * HeapRegion::GrainBytes);
|
|
598 res = _hrs->obj_allocate(word_size);
|
|
599 assert(res != NULL, "This should have worked.");
|
|
600 } else {
|
|
601 // Expansion won't help. Are there enough free regions if we get rid
|
|
602 // of reservations?
|
|
603 size_t avail = free_regions();
|
|
604 if (avail >= num_regions) {
|
|
605 res = _hrs->obj_allocate(word_size);
|
|
606 if (res != NULL) {
|
|
607 remove_allocated_regions_from_lists();
|
|
608 eliminated_allocated_from_lists = true;
|
|
609 }
|
|
610 }
|
|
611 }
|
|
612 }
|
|
613 if (res != NULL) {
|
|
614 // Increment by the number of regions allocated.
|
|
615 // FIXME: Assumes regions all of size GrainBytes.
|
|
616 #ifndef PRODUCT
|
|
617 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
|
|
618 HeapRegion::GrainWords));
|
|
619 #endif
|
|
620 if (!eliminated_allocated_from_lists)
|
|
621 remove_allocated_regions_from_lists();
|
|
622 _summary_bytes_used += word_size * HeapWordSize;
|
|
623 _free_regions -= num_regions;
|
|
624 _num_humongous_regions += (int) num_regions;
|
|
625 }
|
|
626 assert(regions_accounted_for(), "Region Leakage");
|
|
627 return res;
|
|
628 }
|
|
629
|
|
630 HeapWord*
|
|
631 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|
632 bool permit_collection_pause) {
|
|
633 HeapWord* res = NULL;
|
|
634 HeapRegion* allocated_young_region = NULL;
|
|
635
|
|
636 assert( SafepointSynchronize::is_at_safepoint() ||
|
|
637 Heap_lock->owned_by_self(), "pre condition of the call" );
|
|
638
|
|
639 if (isHumongous(word_size)) {
|
|
640 // Allocation of a humongous object can, in a sense, complete a
|
|
641 // partial region, if the previous alloc was also humongous, and
|
|
642 // caused the test below to succeed.
|
|
643 if (permit_collection_pause)
|
|
644 do_collection_pause_if_appropriate(word_size);
|
|
645 res = humongousObjAllocate(word_size);
|
|
646 assert(_cur_alloc_region == NULL
|
|
647 || !_cur_alloc_region->isHumongous(),
|
|
648 "Prevent a regression of this bug.");
|
|
649
|
|
650 } else {
|
|
651 // If we do a collection pause, this will be reset to a non-NULL
|
|
652 // value. If we don't, nulling here ensures that we allocate a new
|
|
653 // region below.
|
|
654 if (_cur_alloc_region != NULL) {
|
|
655 // We're finished with the _cur_alloc_region.
|
|
656 _summary_bytes_used += _cur_alloc_region->used();
|
|
657 _cur_alloc_region = NULL;
|
|
658 }
|
|
659 assert(_cur_alloc_region == NULL, "Invariant.");
|
|
660 // Completion of a heap region is perhaps a good point at which to do
|
|
661 // a collection pause.
|
|
662 if (permit_collection_pause)
|
|
663 do_collection_pause_if_appropriate(word_size);
|
|
664 // Make sure we have an allocation region available.
|
|
665 if (_cur_alloc_region == NULL) {
|
|
666 if (!SafepointSynchronize::is_at_safepoint())
|
|
667 wait_for_cleanup_complete();
|
|
668 bool next_is_young = should_set_young_locked();
|
|
669 // If the next region is not young, make sure it's zero-filled.
|
|
670 _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
|
|
671 if (_cur_alloc_region != NULL) {
|
|
672 _summary_bytes_used -= _cur_alloc_region->used();
|
|
673 if (next_is_young) {
|
|
674 set_region_short_lived_locked(_cur_alloc_region);
|
|
675 allocated_young_region = _cur_alloc_region;
|
|
676 }
|
|
677 }
|
|
678 }
|
|
679 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
|
|
680 "Prevent a regression of this bug.");
|
|
681
|
|
682 // Now retry the allocation.
|
|
683 if (_cur_alloc_region != NULL) {
|
|
684 res = _cur_alloc_region->allocate(word_size);
|
|
685 }
|
|
686 }
|
|
687
|
|
688 // NOTE: fails frequently in PRT
|
|
689 assert(regions_accounted_for(), "Region leakage!");
|
|
690
|
|
691 if (res != NULL) {
|
|
692 if (!SafepointSynchronize::is_at_safepoint()) {
|
|
693 assert( permit_collection_pause, "invariant" );
|
|
694 assert( Heap_lock->owned_by_self(), "invariant" );
|
|
695 Heap_lock->unlock();
|
|
696 }
|
|
697
|
|
698 if (allocated_young_region != NULL) {
|
|
699 HeapRegion* hr = allocated_young_region;
|
|
700 HeapWord* bottom = hr->bottom();
|
|
701 HeapWord* end = hr->end();
|
|
702 MemRegion mr(bottom, end);
|
|
703 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
|
|
704 }
|
|
705 }
|
|
706
|
|
707 assert( SafepointSynchronize::is_at_safepoint() ||
|
|
708 (res == NULL && Heap_lock->owned_by_self()) ||
|
|
709 (res != NULL && !Heap_lock->owned_by_self()),
|
|
710 "post condition of the call" );
|
|
711
|
|
712 return res;
|
|
713 }
|
|
714
|
|
715 HeapWord*
|
|
716 G1CollectedHeap::mem_allocate(size_t word_size,
|
|
717 bool is_noref,
|
|
718 bool is_tlab,
|
|
719 bool* gc_overhead_limit_was_exceeded) {
|
|
720 debug_only(check_for_valid_allocation_state());
|
|
721 assert(no_gc_in_progress(), "Allocation during gc not allowed");
|
|
722 HeapWord* result = NULL;
|
|
723
|
|
724 // Loop until the allocation is satisified,
|
|
725 // or unsatisfied after GC.
|
|
726 for (int try_count = 1; /* return or throw */; try_count += 1) {
|
|
727 int gc_count_before;
|
|
728 {
|
|
729 Heap_lock->lock();
|
|
730 result = attempt_allocation(word_size);
|
|
731 if (result != NULL) {
|
|
732 // attempt_allocation should have unlocked the heap lock
|
|
733 assert(is_in(result), "result not in heap");
|
|
734 return result;
|
|
735 }
|
|
736 // Read the gc count while the heap lock is held.
|
|
737 gc_count_before = SharedHeap::heap()->total_collections();
|
|
738 Heap_lock->unlock();
|
|
739 }
|
|
740
|
|
741 // Create the garbage collection operation...
|
|
742 VM_G1CollectForAllocation op(word_size,
|
|
743 gc_count_before);
|
|
744
|
|
745 // ...and get the VM thread to execute it.
|
|
746 VMThread::execute(&op);
|
|
747 if (op.prologue_succeeded()) {
|
|
748 result = op.result();
|
|
749 assert(result == NULL || is_in(result), "result not in heap");
|
|
750 return result;
|
|
751 }
|
|
752
|
|
753 // Give a warning if we seem to be looping forever.
|
|
754 if ((QueuedAllocationWarningCount > 0) &&
|
|
755 (try_count % QueuedAllocationWarningCount == 0)) {
|
|
756 warning("G1CollectedHeap::mem_allocate_work retries %d times",
|
|
757 try_count);
|
|
758 }
|
|
759 }
|
|
760 }
|
|
761
|
|
762 void G1CollectedHeap::abandon_cur_alloc_region() {
|
|
763 if (_cur_alloc_region != NULL) {
|
|
764 // We're finished with the _cur_alloc_region.
|
|
765 if (_cur_alloc_region->is_empty()) {
|
|
766 _free_regions++;
|
|
767 free_region(_cur_alloc_region);
|
|
768 } else {
|
|
769 _summary_bytes_used += _cur_alloc_region->used();
|
|
770 }
|
|
771 _cur_alloc_region = NULL;
|
|
772 }
|
|
773 }
|
|
774
|
|
775 class PostMCRemSetClearClosure: public HeapRegionClosure {
|
|
776 ModRefBarrierSet* _mr_bs;
|
|
777 public:
|
|
778 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
|
|
779 bool doHeapRegion(HeapRegion* r) {
|
|
780 r->reset_gc_time_stamp();
|
|
781 if (r->continuesHumongous())
|
|
782 return false;
|
|
783 HeapRegionRemSet* hrrs = r->rem_set();
|
|
784 if (hrrs != NULL) hrrs->clear();
|
|
785 // You might think here that we could clear just the cards
|
|
786 // corresponding to the used region. But no: if we leave a dirty card
|
|
787 // in a region we might allocate into, then it would prevent that card
|
|
788 // from being enqueued, and cause it to be missed.
|
|
789 // Re: the performance cost: we shouldn't be doing full GC anyway!
|
|
790 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
|
|
791 return false;
|
|
792 }
|
|
793 };
|
|
794
|
|
795
|
|
796 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
|
|
797 ModRefBarrierSet* _mr_bs;
|
|
798 public:
|
|
799 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
|
|
800 bool doHeapRegion(HeapRegion* r) {
|
|
801 if (r->continuesHumongous()) return false;
|
|
802 if (r->used_region().word_size() != 0) {
|
|
803 _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
|
|
804 }
|
|
805 return false;
|
|
806 }
|
|
807 };
|
|
808
|
|
809 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
|
810 size_t word_size) {
|
|
811 ResourceMark rm;
|
|
812
|
|
813 if (full && DisableExplicitGC) {
|
|
814 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
|
|
815 return;
|
|
816 }
|
|
817
|
|
818 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
819 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
|
820
|
|
821 if (GC_locker::is_active()) {
|
|
822 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
|
823 }
|
|
824
|
|
825 {
|
|
826 IsGCActiveMark x;
|
|
827
|
|
828 // Timing
|
|
829 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
830 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
831 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
|
|
832
|
|
833 double start = os::elapsedTime();
|
|
834 GCOverheadReporter::recordSTWStart(start);
|
|
835 g1_policy()->record_full_collection_start();
|
|
836
|
|
837 gc_prologue(true);
|
|
838 increment_total_collections();
|
|
839
|
|
840 size_t g1h_prev_used = used();
|
|
841 assert(used() == recalculate_used(), "Should be equal");
|
|
842
|
|
843 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
|
844 HandleMark hm; // Discard invalid handles created during verification
|
|
845 prepare_for_verify();
|
|
846 gclog_or_tty->print(" VerifyBeforeGC:");
|
|
847 Universe::verify(true);
|
|
848 }
|
|
849 assert(regions_accounted_for(), "Region leakage!");
|
|
850
|
|
851 COMPILER2_PRESENT(DerivedPointerTable::clear());
|
|
852
|
|
853 // We want to discover references, but not process them yet.
|
|
854 // This mode is disabled in
|
|
855 // instanceRefKlass::process_discovered_references if the
|
|
856 // generation does some collection work, or
|
|
857 // instanceRefKlass::enqueue_discovered_references if the
|
|
858 // generation returns without doing any work.
|
|
859 ref_processor()->disable_discovery();
|
|
860 ref_processor()->abandon_partial_discovery();
|
|
861 ref_processor()->verify_no_references_recorded();
|
|
862
|
|
863 // Abandon current iterations of concurrent marking and concurrent
|
|
864 // refinement, if any are in progress.
|
|
865 concurrent_mark()->abort();
|
|
866
|
|
867 // Make sure we'll choose a new allocation region afterwards.
|
|
868 abandon_cur_alloc_region();
|
|
869 assert(_cur_alloc_region == NULL, "Invariant.");
|
|
870 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
|
|
871 tear_down_region_lists();
|
|
872 set_used_regions_to_need_zero_fill();
|
|
873 if (g1_policy()->in_young_gc_mode()) {
|
|
874 empty_young_list();
|
|
875 g1_policy()->set_full_young_gcs(true);
|
|
876 }
|
|
877
|
|
878 // Temporarily make reference _discovery_ single threaded (non-MT).
|
|
879 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
|
|
880
|
|
881 // Temporarily make refs discovery atomic
|
|
882 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
|
|
883
|
|
884 // Temporarily clear _is_alive_non_header
|
|
885 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
|
|
886
|
|
887 ref_processor()->enable_discovery();
|
|
888
|
|
889 // Do collection work
|
|
890 {
|
|
891 HandleMark hm; // Discard invalid handles created during gc
|
|
892 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
|
|
893 }
|
|
894 // Because freeing humongous regions may have added some unclean
|
|
895 // regions, it is necessary to tear down again before rebuilding.
|
|
896 tear_down_region_lists();
|
|
897 rebuild_region_lists();
|
|
898
|
|
899 _summary_bytes_used = recalculate_used();
|
|
900
|
|
901 ref_processor()->enqueue_discovered_references();
|
|
902
|
|
903 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
|
904
|
|
905 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
|
906 HandleMark hm; // Discard invalid handles created during verification
|
|
907 gclog_or_tty->print(" VerifyAfterGC:");
|
|
908 Universe::verify(false);
|
|
909 }
|
|
910 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
|
|
911
|
|
912 reset_gc_time_stamp();
|
|
913 // Since everything potentially moved, we will clear all remembered
|
|
914 // sets, and clear all cards. Later we will also cards in the used
|
|
915 // portion of the heap after the resizing (which could be a shrinking.)
|
|
916 // We will also reset the GC time stamps of the regions.
|
|
917 PostMCRemSetClearClosure rs_clear(mr_bs());
|
|
918 heap_region_iterate(&rs_clear);
|
|
919
|
|
920 // Resize the heap if necessary.
|
|
921 resize_if_necessary_after_full_collection(full ? 0 : word_size);
|
|
922
|
|
923 // Since everything potentially moved, we will clear all remembered
|
|
924 // sets, but also dirty all cards corresponding to used regions.
|
|
925 PostMCRemSetInvalidateClosure rs_invalidate(mr_bs());
|
|
926 heap_region_iterate(&rs_invalidate);
|
|
927 if (_cg1r->use_cache()) {
|
|
928 _cg1r->clear_and_record_card_counts();
|
|
929 _cg1r->clear_hot_cache();
|
|
930 }
|
|
931
|
|
932 if (PrintGC) {
|
|
933 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
|
|
934 }
|
|
935
|
|
936 if (true) { // FIXME
|
|
937 // Ask the permanent generation to adjust size for full collections
|
|
938 perm()->compute_new_size();
|
|
939 }
|
|
940
|
|
941 double end = os::elapsedTime();
|
|
942 GCOverheadReporter::recordSTWEnd(end);
|
|
943 g1_policy()->record_full_collection_end();
|
|
944
|
|
945 gc_epilogue(true);
|
|
946
|
|
947 // Abandon concurrent refinement. This must happen last: in the
|
|
948 // dirty-card logging system, some cards may be dirty by weak-ref
|
|
949 // processing, and may be enqueued. But the whole card table is
|
|
950 // dirtied, so this should abandon those logs, and set "do_traversal"
|
|
951 // to true.
|
|
952 concurrent_g1_refine()->set_pya_restart();
|
|
953
|
|
954 assert(regions_accounted_for(), "Region leakage!");
|
|
955 }
|
|
956
|
|
957 if (g1_policy()->in_young_gc_mode()) {
|
|
958 _young_list->reset_sampled_info();
|
|
959 assert( check_young_list_empty(false, false),
|
|
960 "young list should be empty at this point");
|
|
961 }
|
|
962 }
|
|
963
|
|
964 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
|
965 do_collection(true, clear_all_soft_refs, 0);
|
|
966 }
|
|
967
|
|
968 // This code is mostly copied from TenuredGeneration.
|
|
969 void
|
|
970 G1CollectedHeap::
|
|
971 resize_if_necessary_after_full_collection(size_t word_size) {
|
|
972 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
|
|
973
|
|
974 // Include the current allocation, if any, and bytes that will be
|
|
975 // pre-allocated to support collections, as "used".
|
|
976 const size_t used_after_gc = used();
|
|
977 const size_t capacity_after_gc = capacity();
|
|
978 const size_t free_after_gc = capacity_after_gc - used_after_gc;
|
|
979
|
|
980 // We don't have floating point command-line arguments
|
|
981 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
|
|
982 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
|
|
983 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
|
|
984 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
|
|
985
|
|
986 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
|
|
987 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
|
|
988
|
|
989 // Don't shrink less than the initial size.
|
|
990 minimum_desired_capacity =
|
|
991 MAX2(minimum_desired_capacity,
|
|
992 collector_policy()->initial_heap_byte_size());
|
|
993 maximum_desired_capacity =
|
|
994 MAX2(maximum_desired_capacity,
|
|
995 collector_policy()->initial_heap_byte_size());
|
|
996
|
|
997 // We are failing here because minimum_desired_capacity is
|
|
998 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
|
|
999 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
|
|
1000
|
|
1001 if (PrintGC && Verbose) {
|
|
1002 const double free_percentage = ((double)free_after_gc) / capacity();
|
|
1003 gclog_or_tty->print_cr("Computing new size after full GC ");
|
|
1004 gclog_or_tty->print_cr(" "
|
|
1005 " minimum_free_percentage: %6.2f",
|
|
1006 minimum_free_percentage);
|
|
1007 gclog_or_tty->print_cr(" "
|
|
1008 " maximum_free_percentage: %6.2f",
|
|
1009 maximum_free_percentage);
|
|
1010 gclog_or_tty->print_cr(" "
|
|
1011 " capacity: %6.1fK"
|
|
1012 " minimum_desired_capacity: %6.1fK"
|
|
1013 " maximum_desired_capacity: %6.1fK",
|
|
1014 capacity() / (double) K,
|
|
1015 minimum_desired_capacity / (double) K,
|
|
1016 maximum_desired_capacity / (double) K);
|
|
1017 gclog_or_tty->print_cr(" "
|
|
1018 " free_after_gc : %6.1fK"
|
|
1019 " used_after_gc : %6.1fK",
|
|
1020 free_after_gc / (double) K,
|
|
1021 used_after_gc / (double) K);
|
|
1022 gclog_or_tty->print_cr(" "
|
|
1023 " free_percentage: %6.2f",
|
|
1024 free_percentage);
|
|
1025 }
|
|
1026 if (capacity() < minimum_desired_capacity) {
|
|
1027 // Don't expand unless it's significant
|
|
1028 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
|
|
1029 expand(expand_bytes);
|
|
1030 if (PrintGC && Verbose) {
|
|
1031 gclog_or_tty->print_cr(" expanding:"
|
|
1032 " minimum_desired_capacity: %6.1fK"
|
|
1033 " expand_bytes: %6.1fK",
|
|
1034 minimum_desired_capacity / (double) K,
|
|
1035 expand_bytes / (double) K);
|
|
1036 }
|
|
1037
|
|
1038 // No expansion, now see if we want to shrink
|
|
1039 } else if (capacity() > maximum_desired_capacity) {
|
|
1040 // Capacity too large, compute shrinking size
|
|
1041 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
|
|
1042 shrink(shrink_bytes);
|
|
1043 if (PrintGC && Verbose) {
|
|
1044 gclog_or_tty->print_cr(" "
|
|
1045 " shrinking:"
|
|
1046 " initSize: %.1fK"
|
|
1047 " maximum_desired_capacity: %.1fK",
|
|
1048 collector_policy()->initial_heap_byte_size() / (double) K,
|
|
1049 maximum_desired_capacity / (double) K);
|
|
1050 gclog_or_tty->print_cr(" "
|
|
1051 " shrink_bytes: %.1fK",
|
|
1052 shrink_bytes / (double) K);
|
|
1053 }
|
|
1054 }
|
|
1055 }
|
|
1056
|
|
1057
|
|
1058 HeapWord*
|
|
1059 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
|
|
1060 HeapWord* result = NULL;
|
|
1061
|
|
1062 // In a G1 heap, we're supposed to keep allocation from failing by
|
|
1063 // incremental pauses. Therefore, at least for now, we'll favor
|
|
1064 // expansion over collection. (This might change in the future if we can
|
|
1065 // do something smarter than full collection to satisfy a failed alloc.)
|
|
1066
|
|
1067 result = expand_and_allocate(word_size);
|
|
1068 if (result != NULL) {
|
|
1069 assert(is_in(result), "result not in heap");
|
|
1070 return result;
|
|
1071 }
|
|
1072
|
|
1073 // OK, I guess we have to try collection.
|
|
1074
|
|
1075 do_collection(false, false, word_size);
|
|
1076
|
|
1077 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
|
|
1078
|
|
1079 if (result != NULL) {
|
|
1080 assert(is_in(result), "result not in heap");
|
|
1081 return result;
|
|
1082 }
|
|
1083
|
|
1084 // Try collecting soft references.
|
|
1085 do_collection(false, true, word_size);
|
|
1086 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
|
|
1087 if (result != NULL) {
|
|
1088 assert(is_in(result), "result not in heap");
|
|
1089 return result;
|
|
1090 }
|
|
1091
|
|
1092 // What else? We might try synchronous finalization later. If the total
|
|
1093 // space available is large enough for the allocation, then a more
|
|
1094 // complete compaction phase than we've tried so far might be
|
|
1095 // appropriate.
|
|
1096 return NULL;
|
|
1097 }
|
|
1098
|
|
1099 // Attempting to expand the heap sufficiently
|
|
1100 // to support an allocation of the given "word_size". If
|
|
1101 // successful, perform the allocation and return the address of the
|
|
1102 // allocated block, or else "NULL".
|
|
1103
|
|
1104 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
|
1105 size_t expand_bytes = word_size * HeapWordSize;
|
|
1106 if (expand_bytes < MinHeapDeltaBytes) {
|
|
1107 expand_bytes = MinHeapDeltaBytes;
|
|
1108 }
|
|
1109 expand(expand_bytes);
|
|
1110 assert(regions_accounted_for(), "Region leakage!");
|
|
1111 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
|
|
1112 return result;
|
|
1113 }
|
|
1114
|
|
1115 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
|
|
1116 size_t pre_used = 0;
|
|
1117 size_t cleared_h_regions = 0;
|
|
1118 size_t freed_regions = 0;
|
|
1119 UncleanRegionList local_list;
|
|
1120 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
|
|
1121 freed_regions, &local_list);
|
|
1122
|
|
1123 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
|
|
1124 &local_list);
|
|
1125 return pre_used;
|
|
1126 }
|
|
1127
|
|
1128 void
|
|
1129 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
|
|
1130 size_t& pre_used,
|
|
1131 size_t& cleared_h,
|
|
1132 size_t& freed_regions,
|
|
1133 UncleanRegionList* list,
|
|
1134 bool par) {
|
|
1135 assert(!hr->continuesHumongous(), "should have filtered these out");
|
|
1136 size_t res = 0;
|
|
1137 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) {
|
|
1138 if (!hr->is_young()) {
|
|
1139 if (G1PolicyVerbose > 0)
|
|
1140 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
|
|
1141 " during cleanup", hr, hr->used());
|
|
1142 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
|
|
1143 }
|
|
1144 }
|
|
1145 }
|
|
1146
|
|
1147 // FIXME: both this and shrink could probably be more efficient by
|
|
1148 // doing one "VirtualSpace::expand_by" call rather than several.
|
|
1149 void G1CollectedHeap::expand(size_t expand_bytes) {
|
|
1150 size_t old_mem_size = _g1_storage.committed_size();
|
|
1151 // We expand by a minimum of 1K.
|
|
1152 expand_bytes = MAX2(expand_bytes, (size_t)K);
|
|
1153 size_t aligned_expand_bytes =
|
|
1154 ReservedSpace::page_align_size_up(expand_bytes);
|
|
1155 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
|
|
1156 HeapRegion::GrainBytes);
|
|
1157 expand_bytes = aligned_expand_bytes;
|
|
1158 while (expand_bytes > 0) {
|
|
1159 HeapWord* base = (HeapWord*)_g1_storage.high();
|
|
1160 // Commit more storage.
|
|
1161 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
|
|
1162 if (!successful) {
|
|
1163 expand_bytes = 0;
|
|
1164 } else {
|
|
1165 expand_bytes -= HeapRegion::GrainBytes;
|
|
1166 // Expand the committed region.
|
|
1167 HeapWord* high = (HeapWord*) _g1_storage.high();
|
|
1168 _g1_committed.set_end(high);
|
|
1169 // Create a new HeapRegion.
|
|
1170 MemRegion mr(base, high);
|
|
1171 bool is_zeroed = !_g1_max_committed.contains(base);
|
|
1172 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
|
|
1173
|
|
1174 // Now update max_committed if necessary.
|
|
1175 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
|
|
1176
|
|
1177 // Add it to the HeapRegionSeq.
|
|
1178 _hrs->insert(hr);
|
|
1179 // Set the zero-fill state, according to whether it's already
|
|
1180 // zeroed.
|
|
1181 {
|
|
1182 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
1183 if (is_zeroed) {
|
|
1184 hr->set_zero_fill_complete();
|
|
1185 put_free_region_on_list_locked(hr);
|
|
1186 } else {
|
|
1187 hr->set_zero_fill_needed();
|
|
1188 put_region_on_unclean_list_locked(hr);
|
|
1189 }
|
|
1190 }
|
|
1191 _free_regions++;
|
|
1192 // And we used up an expansion region to create it.
|
|
1193 _expansion_regions--;
|
|
1194 // Tell the cardtable about it.
|
|
1195 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
|
|
1196 // And the offset table as well.
|
|
1197 _bot_shared->resize(_g1_committed.word_size());
|
|
1198 }
|
|
1199 }
|
|
1200 if (Verbose && PrintGC) {
|
|
1201 size_t new_mem_size = _g1_storage.committed_size();
|
|
1202 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
|
|
1203 old_mem_size/K, aligned_expand_bytes/K,
|
|
1204 new_mem_size/K);
|
|
1205 }
|
|
1206 }
|
|
1207
|
|
1208 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
|
|
1209 {
|
|
1210 size_t old_mem_size = _g1_storage.committed_size();
|
|
1211 size_t aligned_shrink_bytes =
|
|
1212 ReservedSpace::page_align_size_down(shrink_bytes);
|
|
1213 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
|
|
1214 HeapRegion::GrainBytes);
|
|
1215 size_t num_regions_deleted = 0;
|
|
1216 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
|
|
1217
|
|
1218 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
|
|
1219 if (mr.byte_size() > 0)
|
|
1220 _g1_storage.shrink_by(mr.byte_size());
|
|
1221 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
|
|
1222
|
|
1223 _g1_committed.set_end(mr.start());
|
|
1224 _free_regions -= num_regions_deleted;
|
|
1225 _expansion_regions += num_regions_deleted;
|
|
1226
|
|
1227 // Tell the cardtable about it.
|
|
1228 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
|
|
1229
|
|
1230 // And the offset table as well.
|
|
1231 _bot_shared->resize(_g1_committed.word_size());
|
|
1232
|
|
1233 HeapRegionRemSet::shrink_heap(n_regions());
|
|
1234
|
|
1235 if (Verbose && PrintGC) {
|
|
1236 size_t new_mem_size = _g1_storage.committed_size();
|
|
1237 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
|
|
1238 old_mem_size/K, aligned_shrink_bytes/K,
|
|
1239 new_mem_size/K);
|
|
1240 }
|
|
1241 }
|
|
1242
|
|
1243 void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
|
1244 release_gc_alloc_regions();
|
|
1245 tear_down_region_lists(); // We will rebuild them in a moment.
|
|
1246 shrink_helper(shrink_bytes);
|
|
1247 rebuild_region_lists();
|
|
1248 }
|
|
1249
|
|
1250 // Public methods.
|
|
1251
|
|
1252 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
|
|
1253 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
|
1254 #endif // _MSC_VER
|
|
1255
|
|
1256
|
|
1257 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
|
1258 SharedHeap(policy_),
|
|
1259 _g1_policy(policy_),
|
|
1260 _ref_processor(NULL),
|
|
1261 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
|
|
1262 _bot_shared(NULL),
|
|
1263 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
|
|
1264 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
|
|
1265 _evac_failure_scan_stack(NULL) ,
|
|
1266 _mark_in_progress(false),
|
|
1267 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
|
|
1268 _cur_alloc_region(NULL),
|
|
1269 _refine_cte_cl(NULL),
|
|
1270 _free_region_list(NULL), _free_region_list_size(0),
|
|
1271 _free_regions(0),
|
|
1272 _popular_object_boundary(NULL),
|
|
1273 _cur_pop_hr_index(0),
|
|
1274 _popular_regions_to_be_evacuated(NULL),
|
|
1275 _pop_obj_rc_at_copy(),
|
|
1276 _full_collection(false),
|
|
1277 _unclean_region_list(),
|
|
1278 _unclean_regions_coming(false),
|
|
1279 _young_list(new YoungList(this)),
|
|
1280 _gc_time_stamp(0),
|
|
1281 _surviving_young_words(NULL)
|
|
1282 {
|
|
1283 _g1h = this; // To catch bugs.
|
|
1284 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
|
1285 vm_exit_during_initialization("Failed necessary allocation.");
|
|
1286 }
|
|
1287 int n_queues = MAX2((int)ParallelGCThreads, 1);
|
|
1288 _task_queues = new RefToScanQueueSet(n_queues);
|
|
1289
|
|
1290 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
|
1291 assert(n_rem_sets > 0, "Invariant.");
|
|
1292
|
|
1293 HeapRegionRemSetIterator** iter_arr =
|
|
1294 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
|
|
1295 for (int i = 0; i < n_queues; i++) {
|
|
1296 iter_arr[i] = new HeapRegionRemSetIterator();
|
|
1297 }
|
|
1298 _rem_set_iterator = iter_arr;
|
|
1299
|
|
1300 for (int i = 0; i < n_queues; i++) {
|
|
1301 RefToScanQueue* q = new RefToScanQueue();
|
|
1302 q->initialize();
|
|
1303 _task_queues->register_queue(i, q);
|
|
1304 }
|
|
1305
|
|
1306 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
1307 _gc_alloc_regions[ap] = NULL;
|
|
1308 _gc_alloc_region_counts[ap] = 0;
|
|
1309 }
|
|
1310 guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
|
1311 }
|
|
1312
|
|
1313 jint G1CollectedHeap::initialize() {
|
|
1314 os::enable_vtime();
|
|
1315
|
|
1316 // Necessary to satisfy locking discipline assertions.
|
|
1317
|
|
1318 MutexLocker x(Heap_lock);
|
|
1319
|
|
1320 // While there are no constraints in the GC code that HeapWordSize
|
|
1321 // be any particular value, there are multiple other areas in the
|
|
1322 // system which believe this to be true (e.g. oop->object_size in some
|
|
1323 // cases incorrectly returns the size in wordSize units rather than
|
|
1324 // HeapWordSize).
|
|
1325 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
|
1326
|
|
1327 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
|
|
1328 size_t max_byte_size = collector_policy()->max_heap_byte_size();
|
|
1329
|
|
1330 // Ensure that the sizes are properly aligned.
|
|
1331 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
|
1332 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
|
1333
|
|
1334 // We allocate this in any case, but only do no work if the command line
|
|
1335 // param is off.
|
|
1336 _cg1r = new ConcurrentG1Refine();
|
|
1337
|
|
1338 // Reserve the maximum.
|
|
1339 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
|
|
1340 // Includes the perm-gen.
|
|
1341 ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
|
|
1342 HeapRegion::GrainBytes,
|
|
1343 false /*ism*/);
|
|
1344
|
|
1345 if (!heap_rs.is_reserved()) {
|
|
1346 vm_exit_during_initialization("Could not reserve enough space for object heap");
|
|
1347 return JNI_ENOMEM;
|
|
1348 }
|
|
1349
|
|
1350 // It is important to do this in a way such that concurrent readers can't
|
|
1351 // temporarily think somethings in the heap. (I've actually seen this
|
|
1352 // happen in asserts: DLD.)
|
|
1353 _reserved.set_word_size(0);
|
|
1354 _reserved.set_start((HeapWord*)heap_rs.base());
|
|
1355 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
|
1356
|
|
1357 _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
|
|
1358
|
|
1359 _num_humongous_regions = 0;
|
|
1360
|
|
1361 // Create the gen rem set (and barrier set) for the entire reserved region.
|
|
1362 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
|
1363 set_barrier_set(rem_set()->bs());
|
|
1364 if (barrier_set()->is_a(BarrierSet::ModRef)) {
|
|
1365 _mr_bs = (ModRefBarrierSet*)_barrier_set;
|
|
1366 } else {
|
|
1367 vm_exit_during_initialization("G1 requires a mod ref bs.");
|
|
1368 return JNI_ENOMEM;
|
|
1369 }
|
|
1370
|
|
1371 // Also create a G1 rem set.
|
|
1372 if (G1UseHRIntoRS) {
|
|
1373 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
|
|
1374 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
|
|
1375 } else {
|
|
1376 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
|
|
1377 return JNI_ENOMEM;
|
|
1378 }
|
|
1379 } else {
|
|
1380 _g1_rem_set = new StupidG1RemSet(this);
|
|
1381 }
|
|
1382
|
|
1383 // Carve out the G1 part of the heap.
|
|
1384
|
|
1385 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
|
|
1386 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
|
|
1387 g1_rs.size()/HeapWordSize);
|
|
1388 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
|
|
1389
|
|
1390 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
|
|
1391
|
|
1392 _g1_storage.initialize(g1_rs, 0);
|
|
1393 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
|
|
1394 _g1_max_committed = _g1_committed;
|
|
1395 _hrs = new HeapRegionSeq();
|
|
1396 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
|
|
1397 guarantee(_cur_alloc_region == NULL, "from constructor");
|
|
1398
|
|
1399 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
|
|
1400 heap_word_size(init_byte_size));
|
|
1401
|
|
1402 _g1h = this;
|
|
1403
|
|
1404 // Create the ConcurrentMark data structure and thread.
|
|
1405 // (Must do this late, so that "max_regions" is defined.)
|
|
1406 _cm = new ConcurrentMark(heap_rs, (int) max_regions());
|
|
1407 _cmThread = _cm->cmThread();
|
|
1408
|
|
1409 // ...and the concurrent zero-fill thread, if necessary.
|
|
1410 if (G1ConcZeroFill) {
|
|
1411 _czft = new ConcurrentZFThread();
|
|
1412 }
|
|
1413
|
|
1414
|
|
1415
|
|
1416 // Allocate the popular regions; take them off free lists.
|
|
1417 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes;
|
|
1418 expand(pop_byte_size);
|
|
1419 _popular_object_boundary =
|
|
1420 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords);
|
|
1421 for (int i = 0; i < G1NumPopularRegions; i++) {
|
|
1422 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords);
|
|
1423 // assert(hr != NULL && hr->bottom() < _popular_object_boundary,
|
|
1424 // "Should be enough, and all should be below boundary.");
|
|
1425 hr->set_popular(true);
|
|
1426 }
|
|
1427 assert(_cur_pop_hr_index == 0, "Start allocating at the first region.");
|
|
1428
|
|
1429 // Initialize the from_card cache structure of HeapRegionRemSet.
|
|
1430 HeapRegionRemSet::init_heap(max_regions());
|
|
1431
|
|
1432 // Now expand into the rest of the initial heap size.
|
|
1433 expand(init_byte_size - pop_byte_size);
|
|
1434
|
|
1435 // Perform any initialization actions delegated to the policy.
|
|
1436 g1_policy()->init();
|
|
1437
|
|
1438 g1_policy()->note_start_of_mark_thread();
|
|
1439
|
|
1440 _refine_cte_cl =
|
|
1441 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
|
|
1442 g1_rem_set(),
|
|
1443 concurrent_g1_refine());
|
|
1444 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
|
|
1445
|
|
1446 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
|
|
1447 SATB_Q_FL_lock,
|
|
1448 0,
|
|
1449 Shared_SATB_Q_lock);
|
|
1450 if (G1RSBarrierUseQueue) {
|
|
1451 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
|
1452 DirtyCardQ_FL_lock,
|
|
1453 G1DirtyCardQueueMax,
|
|
1454 Shared_DirtyCardQ_lock);
|
|
1455 }
|
|
1456 // In case we're keeping closure specialization stats, initialize those
|
|
1457 // counts and that mechanism.
|
|
1458 SpecializationStats::clear();
|
|
1459
|
|
1460 _gc_alloc_region_list = NULL;
|
|
1461
|
|
1462 // Do later initialization work for concurrent refinement.
|
|
1463 _cg1r->init();
|
|
1464
|
|
1465 const char* group_names[] = { "CR", "ZF", "CM", "CL" };
|
|
1466 GCOverheadReporter::initGCOverheadReporter(4, group_names);
|
|
1467
|
|
1468 return JNI_OK;
|
|
1469 }
|
|
1470
|
|
1471 void G1CollectedHeap::ref_processing_init() {
|
|
1472 SharedHeap::ref_processing_init();
|
|
1473 MemRegion mr = reserved_region();
|
|
1474 _ref_processor = ReferenceProcessor::create_ref_processor(
|
|
1475 mr, // span
|
|
1476 false, // Reference discovery is not atomic
|
|
1477 // (though it shouldn't matter here.)
|
|
1478 true, // mt_discovery
|
|
1479 NULL, // is alive closure: need to fill this in for efficiency
|
|
1480 ParallelGCThreads,
|
|
1481 ParallelRefProcEnabled,
|
|
1482 true); // Setting next fields of discovered
|
|
1483 // lists requires a barrier.
|
|
1484 }
|
|
1485
|
|
1486 size_t G1CollectedHeap::capacity() const {
|
|
1487 return _g1_committed.byte_size();
|
|
1488 }
|
|
1489
|
|
1490 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
|
|
1491 int worker_i) {
|
|
1492 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
|
1493 int n_completed_buffers = 0;
|
|
1494 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
|
|
1495 n_completed_buffers++;
|
|
1496 }
|
|
1497 g1_policy()->record_update_rs_processed_buffers(worker_i,
|
|
1498 (double) n_completed_buffers);
|
|
1499 dcqs.clear_n_completed_buffers();
|
|
1500 // Finish up the queue...
|
|
1501 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i,
|
|
1502 g1_rem_set());
|
|
1503 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
|
|
1504 }
|
|
1505
|
|
1506
|
|
1507 // Computes the sum of the storage used by the various regions.
|
|
1508
|
|
1509 size_t G1CollectedHeap::used() const {
|
|
1510 assert(Heap_lock->owner() != NULL,
|
|
1511 "Should be owned on this thread's behalf.");
|
|
1512 size_t result = _summary_bytes_used;
|
|
1513 if (_cur_alloc_region != NULL)
|
|
1514 result += _cur_alloc_region->used();
|
|
1515 return result;
|
|
1516 }
|
|
1517
|
|
1518 class SumUsedClosure: public HeapRegionClosure {
|
|
1519 size_t _used;
|
|
1520 public:
|
|
1521 SumUsedClosure() : _used(0) {}
|
|
1522 bool doHeapRegion(HeapRegion* r) {
|
|
1523 if (!r->continuesHumongous()) {
|
|
1524 _used += r->used();
|
|
1525 }
|
|
1526 return false;
|
|
1527 }
|
|
1528 size_t result() { return _used; }
|
|
1529 };
|
|
1530
|
|
1531 size_t G1CollectedHeap::recalculate_used() const {
|
|
1532 SumUsedClosure blk;
|
|
1533 _hrs->iterate(&blk);
|
|
1534 return blk.result();
|
|
1535 }
|
|
1536
|
|
1537 #ifndef PRODUCT
|
|
1538 class SumUsedRegionsClosure: public HeapRegionClosure {
|
|
1539 size_t _num;
|
|
1540 public:
|
|
1541 // _num is set to 1 to account for the popular region
|
|
1542 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {}
|
|
1543 bool doHeapRegion(HeapRegion* r) {
|
|
1544 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
|
|
1545 _num += 1;
|
|
1546 }
|
|
1547 return false;
|
|
1548 }
|
|
1549 size_t result() { return _num; }
|
|
1550 };
|
|
1551
|
|
1552 size_t G1CollectedHeap::recalculate_used_regions() const {
|
|
1553 SumUsedRegionsClosure blk;
|
|
1554 _hrs->iterate(&blk);
|
|
1555 return blk.result();
|
|
1556 }
|
|
1557 #endif // PRODUCT
|
|
1558
|
|
1559 size_t G1CollectedHeap::unsafe_max_alloc() {
|
|
1560 if (_free_regions > 0) return HeapRegion::GrainBytes;
|
|
1561 // otherwise, is there space in the current allocation region?
|
|
1562
|
|
1563 // We need to store the current allocation region in a local variable
|
|
1564 // here. The problem is that this method doesn't take any locks and
|
|
1565 // there may be other threads which overwrite the current allocation
|
|
1566 // region field. attempt_allocation(), for example, sets it to NULL
|
|
1567 // and this can happen *after* the NULL check here but before the call
|
|
1568 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
|
|
1569 // to be a problem in the optimized build, since the two loads of the
|
|
1570 // current allocation region field are optimized away.
|
|
1571 HeapRegion* car = _cur_alloc_region;
|
|
1572
|
|
1573 // FIXME: should iterate over all regions?
|
|
1574 if (car == NULL) {
|
|
1575 return 0;
|
|
1576 }
|
|
1577 return car->free();
|
|
1578 }
|
|
1579
|
|
1580 void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|
1581 // The caller doesn't have the Heap_lock
|
|
1582 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
1583 MutexLocker ml(Heap_lock);
|
|
1584 collect_locked(cause);
|
|
1585 }
|
|
1586
|
|
1587 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
|
1588 assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
|
1589 assert(Heap_lock->is_locked(), "Precondition#2");
|
|
1590 GCCauseSetter gcs(this, cause);
|
|
1591 switch (cause) {
|
|
1592 case GCCause::_heap_inspection:
|
|
1593 case GCCause::_heap_dump: {
|
|
1594 HandleMark hm;
|
|
1595 do_full_collection(false); // don't clear all soft refs
|
|
1596 break;
|
|
1597 }
|
|
1598 default: // XXX FIX ME
|
|
1599 ShouldNotReachHere(); // Unexpected use of this function
|
|
1600 }
|
|
1601 }
|
|
1602
|
|
1603
|
|
1604 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
|
|
1605 // Don't want to do a GC until cleanup is completed.
|
|
1606 wait_for_cleanup_complete();
|
|
1607
|
|
1608 // Read the GC count while holding the Heap_lock
|
|
1609 int gc_count_before = SharedHeap::heap()->total_collections();
|
|
1610 {
|
|
1611 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
|
1612 VM_G1CollectFull op(gc_count_before, cause);
|
|
1613 VMThread::execute(&op);
|
|
1614 }
|
|
1615 }
|
|
1616
|
|
1617 bool G1CollectedHeap::is_in(const void* p) const {
|
|
1618 if (_g1_committed.contains(p)) {
|
|
1619 HeapRegion* hr = _hrs->addr_to_region(p);
|
|
1620 return hr->is_in(p);
|
|
1621 } else {
|
|
1622 return _perm_gen->as_gen()->is_in(p);
|
|
1623 }
|
|
1624 }
|
|
1625
|
|
1626 // Iteration functions.
|
|
1627
|
|
1628 // Iterates an OopClosure over all ref-containing fields of objects
|
|
1629 // within a HeapRegion.
|
|
1630
|
|
1631 class IterateOopClosureRegionClosure: public HeapRegionClosure {
|
|
1632 MemRegion _mr;
|
|
1633 OopClosure* _cl;
|
|
1634 public:
|
|
1635 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
|
|
1636 : _mr(mr), _cl(cl) {}
|
|
1637 bool doHeapRegion(HeapRegion* r) {
|
|
1638 if (! r->continuesHumongous()) {
|
|
1639 r->oop_iterate(_cl);
|
|
1640 }
|
|
1641 return false;
|
|
1642 }
|
|
1643 };
|
|
1644
|
|
1645 void G1CollectedHeap::oop_iterate(OopClosure* cl) {
|
|
1646 IterateOopClosureRegionClosure blk(_g1_committed, cl);
|
|
1647 _hrs->iterate(&blk);
|
|
1648 }
|
|
1649
|
|
1650 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
|
|
1651 IterateOopClosureRegionClosure blk(mr, cl);
|
|
1652 _hrs->iterate(&blk);
|
|
1653 }
|
|
1654
|
|
1655 // Iterates an ObjectClosure over all objects within a HeapRegion.
|
|
1656
|
|
1657 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
|
|
1658 ObjectClosure* _cl;
|
|
1659 public:
|
|
1660 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
|
|
1661 bool doHeapRegion(HeapRegion* r) {
|
|
1662 if (! r->continuesHumongous()) {
|
|
1663 r->object_iterate(_cl);
|
|
1664 }
|
|
1665 return false;
|
|
1666 }
|
|
1667 };
|
|
1668
|
|
1669 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
|
1670 IterateObjectClosureRegionClosure blk(cl);
|
|
1671 _hrs->iterate(&blk);
|
|
1672 }
|
|
1673
|
|
1674 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
|
|
1675 // FIXME: is this right?
|
|
1676 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
|
|
1677 }
|
|
1678
|
|
1679 // Calls a SpaceClosure on a HeapRegion.
|
|
1680
|
|
1681 class SpaceClosureRegionClosure: public HeapRegionClosure {
|
|
1682 SpaceClosure* _cl;
|
|
1683 public:
|
|
1684 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
|
|
1685 bool doHeapRegion(HeapRegion* r) {
|
|
1686 _cl->do_space(r);
|
|
1687 return false;
|
|
1688 }
|
|
1689 };
|
|
1690
|
|
1691 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
|
|
1692 SpaceClosureRegionClosure blk(cl);
|
|
1693 _hrs->iterate(&blk);
|
|
1694 }
|
|
1695
|
|
1696 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
|
|
1697 _hrs->iterate(cl);
|
|
1698 }
|
|
1699
|
|
1700 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
|
|
1701 HeapRegionClosure* cl) {
|
|
1702 _hrs->iterate_from(r, cl);
|
|
1703 }
|
|
1704
|
|
1705 void
|
|
1706 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
|
|
1707 _hrs->iterate_from(idx, cl);
|
|
1708 }
|
|
1709
|
|
1710 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
|
|
1711
|
|
1712 const int OverpartitionFactor = 4;
|
|
1713 void
|
|
1714 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
|
1715 int worker,
|
|
1716 jint claim_value) {
|
|
1717 // We break up the heap regions into blocks of size ParallelGCThreads (to
|
|
1718 // decrease iteration costs).
|
|
1719 const size_t nregions = n_regions();
|
|
1720 const size_t n_thrds = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
|
|
1721 const size_t partitions = n_thrds * OverpartitionFactor;
|
|
1722 const size_t BlkSize = MAX2(nregions/partitions, (size_t)1);
|
|
1723 const size_t n_blocks = (nregions + BlkSize - 1)/BlkSize;
|
|
1724 assert(ParallelGCThreads > 0 || worker == 0, "Precondition");
|
|
1725 const int init_idx = (int) (n_blocks/n_thrds * worker);
|
|
1726 for (size_t blk = 0; blk < n_blocks; blk++) {
|
|
1727 size_t idx = init_idx + blk;
|
|
1728 if (idx >= n_blocks) idx = idx - n_blocks;
|
|
1729 size_t reg_idx = idx * BlkSize;
|
|
1730 assert(reg_idx < nregions, "Because we rounded blk up.");
|
|
1731 HeapRegion* r = region_at(reg_idx);
|
|
1732 if (r->claimHeapRegion(claim_value)) {
|
|
1733 for (size_t j = 0; j < BlkSize; j++) {
|
|
1734 size_t reg_idx2 = reg_idx + j;
|
|
1735 if (reg_idx2 == nregions) break;
|
|
1736 HeapRegion* r2 = region_at(reg_idx2);
|
|
1737 if (j > 0) r2->set_claim_value(claim_value);
|
|
1738 bool res = cl->doHeapRegion(r2);
|
|
1739 guarantee(!res, "Should not abort.");
|
|
1740 }
|
|
1741 }
|
|
1742 }
|
|
1743 }
|
|
1744
|
|
1745 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
|
|
1746 HeapRegion* r = g1_policy()->collection_set();
|
|
1747 while (r != NULL) {
|
|
1748 HeapRegion* next = r->next_in_collection_set();
|
|
1749 if (cl->doHeapRegion(r)) {
|
|
1750 cl->incomplete();
|
|
1751 return;
|
|
1752 }
|
|
1753 r = next;
|
|
1754 }
|
|
1755 }
|
|
1756
|
|
1757 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
|
1758 HeapRegionClosure *cl) {
|
|
1759 assert(r->in_collection_set(),
|
|
1760 "Start region must be a member of the collection set.");
|
|
1761 HeapRegion* cur = r;
|
|
1762 while (cur != NULL) {
|
|
1763 HeapRegion* next = cur->next_in_collection_set();
|
|
1764 if (cl->doHeapRegion(cur) && false) {
|
|
1765 cl->incomplete();
|
|
1766 return;
|
|
1767 }
|
|
1768 cur = next;
|
|
1769 }
|
|
1770 cur = g1_policy()->collection_set();
|
|
1771 while (cur != r) {
|
|
1772 HeapRegion* next = cur->next_in_collection_set();
|
|
1773 if (cl->doHeapRegion(cur) && false) {
|
|
1774 cl->incomplete();
|
|
1775 return;
|
|
1776 }
|
|
1777 cur = next;
|
|
1778 }
|
|
1779 }
|
|
1780
|
|
1781 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
|
|
1782 return _hrs->length() > 0 ? _hrs->at(0) : NULL;
|
|
1783 }
|
|
1784
|
|
1785
|
|
1786 Space* G1CollectedHeap::space_containing(const void* addr) const {
|
|
1787 Space* res = heap_region_containing(addr);
|
|
1788 if (res == NULL)
|
|
1789 res = perm_gen()->space_containing(addr);
|
|
1790 return res;
|
|
1791 }
|
|
1792
|
|
1793 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
|
|
1794 Space* sp = space_containing(addr);
|
|
1795 if (sp != NULL) {
|
|
1796 return sp->block_start(addr);
|
|
1797 }
|
|
1798 return NULL;
|
|
1799 }
|
|
1800
|
|
1801 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
|
|
1802 Space* sp = space_containing(addr);
|
|
1803 assert(sp != NULL, "block_size of address outside of heap");
|
|
1804 return sp->block_size(addr);
|
|
1805 }
|
|
1806
|
|
1807 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
|
|
1808 Space* sp = space_containing(addr);
|
|
1809 return sp->block_is_obj(addr);
|
|
1810 }
|
|
1811
|
|
1812 bool G1CollectedHeap::supports_tlab_allocation() const {
|
|
1813 return true;
|
|
1814 }
|
|
1815
|
|
1816 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
|
|
1817 return HeapRegion::GrainBytes;
|
|
1818 }
|
|
1819
|
|
1820 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
|
1821 // Return the remaining space in the cur alloc region, but not less than
|
|
1822 // the min TLAB size.
|
|
1823 // Also, no more than half the region size, since we can't allow tlabs to
|
|
1824 // grow big enough to accomodate humongous objects.
|
|
1825
|
|
1826 // We need to story it locally, since it might change between when we
|
|
1827 // test for NULL and when we use it later.
|
|
1828 ContiguousSpace* cur_alloc_space = _cur_alloc_region;
|
|
1829 if (cur_alloc_space == NULL) {
|
|
1830 return HeapRegion::GrainBytes/2;
|
|
1831 } else {
|
|
1832 return MAX2(MIN2(cur_alloc_space->free(),
|
|
1833 (size_t)(HeapRegion::GrainBytes/2)),
|
|
1834 (size_t)MinTLABSize);
|
|
1835 }
|
|
1836 }
|
|
1837
|
|
1838 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
|
|
1839 bool dummy;
|
|
1840 return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
|
|
1841 }
|
|
1842
|
|
1843 bool G1CollectedHeap::allocs_are_zero_filled() {
|
|
1844 return false;
|
|
1845 }
|
|
1846
|
|
1847 size_t G1CollectedHeap::large_typearray_limit() {
|
|
1848 // FIXME
|
|
1849 return HeapRegion::GrainBytes/HeapWordSize;
|
|
1850 }
|
|
1851
|
|
1852 size_t G1CollectedHeap::max_capacity() const {
|
|
1853 return _g1_committed.byte_size();
|
|
1854 }
|
|
1855
|
|
1856 jlong G1CollectedHeap::millis_since_last_gc() {
|
|
1857 // assert(false, "NYI");
|
|
1858 return 0;
|
|
1859 }
|
|
1860
|
|
1861
|
|
1862 void G1CollectedHeap::prepare_for_verify() {
|
|
1863 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
|
1864 ensure_parsability(false);
|
|
1865 }
|
|
1866 g1_rem_set()->prepare_for_verify();
|
|
1867 }
|
|
1868
|
|
1869 class VerifyLivenessOopClosure: public OopClosure {
|
|
1870 G1CollectedHeap* g1h;
|
|
1871 public:
|
|
1872 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
|
|
1873 g1h = _g1h;
|
|
1874 }
|
|
1875 void do_oop(narrowOop *p) {
|
|
1876 guarantee(false, "NYI");
|
|
1877 }
|
|
1878 void do_oop(oop *p) {
|
|
1879 oop obj = *p;
|
|
1880 assert(obj == NULL || !g1h->is_obj_dead(obj),
|
|
1881 "Dead object referenced by a not dead object");
|
|
1882 }
|
|
1883 };
|
|
1884
|
|
1885 class VerifyObjsInRegionClosure: public ObjectClosure {
|
|
1886 G1CollectedHeap* _g1h;
|
|
1887 size_t _live_bytes;
|
|
1888 HeapRegion *_hr;
|
|
1889 public:
|
|
1890 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
|
|
1891 _g1h = G1CollectedHeap::heap();
|
|
1892 }
|
|
1893 void do_object(oop o) {
|
|
1894 VerifyLivenessOopClosure isLive(_g1h);
|
|
1895 assert(o != NULL, "Huh?");
|
|
1896 if (!_g1h->is_obj_dead(o)) {
|
|
1897 o->oop_iterate(&isLive);
|
|
1898 if (!_hr->obj_allocated_since_prev_marking(o))
|
|
1899 _live_bytes += (o->size() * HeapWordSize);
|
|
1900 }
|
|
1901 }
|
|
1902 size_t live_bytes() { return _live_bytes; }
|
|
1903 };
|
|
1904
|
|
1905 class PrintObjsInRegionClosure : public ObjectClosure {
|
|
1906 HeapRegion *_hr;
|
|
1907 G1CollectedHeap *_g1;
|
|
1908 public:
|
|
1909 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
|
|
1910 _g1 = G1CollectedHeap::heap();
|
|
1911 };
|
|
1912
|
|
1913 void do_object(oop o) {
|
|
1914 if (o != NULL) {
|
|
1915 HeapWord *start = (HeapWord *) o;
|
|
1916 size_t word_sz = o->size();
|
|
1917 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
|
|
1918 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
|
|
1919 (void*) o, word_sz,
|
|
1920 _g1->isMarkedPrev(o),
|
|
1921 _g1->isMarkedNext(o),
|
|
1922 _hr->obj_allocated_since_prev_marking(o));
|
|
1923 HeapWord *end = start + word_sz;
|
|
1924 HeapWord *cur;
|
|
1925 int *val;
|
|
1926 for (cur = start; cur < end; cur++) {
|
|
1927 val = (int *) cur;
|
|
1928 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
|
|
1929 }
|
|
1930 }
|
|
1931 }
|
|
1932 };
|
|
1933
|
|
1934 class VerifyRegionClosure: public HeapRegionClosure {
|
|
1935 public:
|
|
1936 bool _allow_dirty;
|
|
1937 VerifyRegionClosure(bool allow_dirty)
|
|
1938 : _allow_dirty(allow_dirty) {}
|
|
1939 bool doHeapRegion(HeapRegion* r) {
|
|
1940 guarantee(r->claim_value() == 0, "Should be unclaimed at verify points.");
|
|
1941 if (r->isHumongous()) {
|
|
1942 if (r->startsHumongous()) {
|
|
1943 // Verify the single H object.
|
|
1944 oop(r->bottom())->verify();
|
|
1945 size_t word_sz = oop(r->bottom())->size();
|
|
1946 guarantee(r->top() == r->bottom() + word_sz,
|
|
1947 "Only one object in a humongous region");
|
|
1948 }
|
|
1949 } else {
|
|
1950 VerifyObjsInRegionClosure not_dead_yet_cl(r);
|
|
1951 r->verify(_allow_dirty);
|
|
1952 r->object_iterate(¬_dead_yet_cl);
|
|
1953 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
|
|
1954 "More live objects than counted in last complete marking.");
|
|
1955 }
|
|
1956 return false;
|
|
1957 }
|
|
1958 };
|
|
1959
|
|
1960 class VerifyRootsClosure: public OopsInGenClosure {
|
|
1961 private:
|
|
1962 G1CollectedHeap* _g1h;
|
|
1963 bool _failures;
|
|
1964
|
|
1965 public:
|
|
1966 VerifyRootsClosure() :
|
|
1967 _g1h(G1CollectedHeap::heap()), _failures(false) { }
|
|
1968
|
|
1969 bool failures() { return _failures; }
|
|
1970
|
|
1971 void do_oop(narrowOop* p) {
|
|
1972 guarantee(false, "NYI");
|
|
1973 }
|
|
1974
|
|
1975 void do_oop(oop* p) {
|
|
1976 oop obj = *p;
|
|
1977 if (obj != NULL) {
|
|
1978 if (_g1h->is_obj_dead(obj)) {
|
|
1979 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
|
1980 "points to dead obj "PTR_FORMAT, p, (void*) obj);
|
|
1981 obj->print_on(gclog_or_tty);
|
|
1982 _failures = true;
|
|
1983 }
|
|
1984 }
|
|
1985 }
|
|
1986 };
|
|
1987
|
|
1988 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
|
|
1989 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
|
1990 if (!silent) { gclog_or_tty->print("roots "); }
|
|
1991 VerifyRootsClosure rootsCl;
|
|
1992 process_strong_roots(false,
|
|
1993 SharedHeap::SO_AllClasses,
|
|
1994 &rootsCl,
|
|
1995 &rootsCl);
|
|
1996 rem_set()->invalidate(perm_gen()->used_region(), false);
|
|
1997 if (!silent) { gclog_or_tty->print("heapRegions "); }
|
|
1998 VerifyRegionClosure blk(allow_dirty);
|
|
1999 _hrs->iterate(&blk);
|
|
2000 if (!silent) gclog_or_tty->print("remset ");
|
|
2001 rem_set()->verify();
|
|
2002 guarantee(!rootsCl.failures(), "should not have had failures");
|
|
2003 } else {
|
|
2004 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
|
|
2005 }
|
|
2006 }
|
|
2007
|
|
2008 class PrintRegionClosure: public HeapRegionClosure {
|
|
2009 outputStream* _st;
|
|
2010 public:
|
|
2011 PrintRegionClosure(outputStream* st) : _st(st) {}
|
|
2012 bool doHeapRegion(HeapRegion* r) {
|
|
2013 r->print_on(_st);
|
|
2014 return false;
|
|
2015 }
|
|
2016 };
|
|
2017
|
|
2018 void G1CollectedHeap::print() const { print_on(gclog_or_tty); }
|
|
2019
|
|
2020 void G1CollectedHeap::print_on(outputStream* st) const {
|
|
2021 PrintRegionClosure blk(st);
|
|
2022 _hrs->iterate(&blk);
|
|
2023 }
|
|
2024
|
|
2025 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
|
|
2026 if (ParallelGCThreads > 0) {
|
|
2027 workers()->print_worker_threads();
|
|
2028 }
|
|
2029 st->print("\"G1 concurrent mark GC Thread\" ");
|
|
2030 _cmThread->print();
|
|
2031 st->cr();
|
|
2032 st->print("\"G1 concurrent refinement GC Thread\" ");
|
|
2033 _cg1r->cg1rThread()->print_on(st);
|
|
2034 st->cr();
|
|
2035 st->print("\"G1 zero-fill GC Thread\" ");
|
|
2036 _czft->print_on(st);
|
|
2037 st->cr();
|
|
2038 }
|
|
2039
|
|
2040 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
|
2041 if (ParallelGCThreads > 0) {
|
|
2042 workers()->threads_do(tc);
|
|
2043 }
|
|
2044 tc->do_thread(_cmThread);
|
|
2045 tc->do_thread(_cg1r->cg1rThread());
|
|
2046 tc->do_thread(_czft);
|
|
2047 }
|
|
2048
|
|
2049 void G1CollectedHeap::print_tracing_info() const {
|
|
2050 concurrent_g1_refine()->print_final_card_counts();
|
|
2051
|
|
2052 // We'll overload this to mean "trace GC pause statistics."
|
|
2053 if (TraceGen0Time || TraceGen1Time) {
|
|
2054 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
|
|
2055 // to that.
|
|
2056 g1_policy()->print_tracing_info();
|
|
2057 }
|
|
2058 if (SummarizeG1RSStats) {
|
|
2059 g1_rem_set()->print_summary_info();
|
|
2060 }
|
|
2061 if (SummarizeG1ConcMark) {
|
|
2062 concurrent_mark()->print_summary_info();
|
|
2063 }
|
|
2064 if (SummarizeG1ZFStats) {
|
|
2065 ConcurrentZFThread::print_summary_info();
|
|
2066 }
|
|
2067 if (G1SummarizePopularity) {
|
|
2068 print_popularity_summary_info();
|
|
2069 }
|
|
2070 g1_policy()->print_yg_surv_rate_info();
|
|
2071
|
|
2072 GCOverheadReporter::printGCOverhead();
|
|
2073
|
|
2074 SpecializationStats::print();
|
|
2075 }
|
|
2076
|
|
2077
|
|
2078 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
|
|
2079 HeapRegion* hr = heap_region_containing(addr);
|
|
2080 if (hr == NULL) {
|
|
2081 return 0;
|
|
2082 } else {
|
|
2083 return 1;
|
|
2084 }
|
|
2085 }
|
|
2086
|
|
2087 G1CollectedHeap* G1CollectedHeap::heap() {
|
|
2088 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
|
|
2089 "not a garbage-first heap");
|
|
2090 return _g1h;
|
|
2091 }
|
|
2092
|
|
2093 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
|
2094 if (PrintHeapAtGC){
|
|
2095 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections());
|
|
2096 Universe::print();
|
|
2097 }
|
|
2098 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
|
2099 // Call allocation profiler
|
|
2100 AllocationProfiler::iterate_since_last_gc();
|
|
2101 // Fill TLAB's and such
|
|
2102 ensure_parsability(true);
|
|
2103 }
|
|
2104
|
|
2105 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
|
2106 // FIXME: what is this about?
|
|
2107 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
|
|
2108 // is set.
|
|
2109 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
|
|
2110 "derived pointer present"));
|
|
2111
|
|
2112 if (PrintHeapAtGC){
|
|
2113 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections());
|
|
2114 Universe::print();
|
|
2115 gclog_or_tty->print("} ");
|
|
2116 }
|
|
2117 }
|
|
2118
|
|
2119 void G1CollectedHeap::do_collection_pause() {
|
|
2120 // Read the GC count while holding the Heap_lock
|
|
2121 // we need to do this _before_ wait_for_cleanup_complete(), to
|
|
2122 // ensure that we do not give up the heap lock and potentially
|
|
2123 // pick up the wrong count
|
|
2124 int gc_count_before = SharedHeap::heap()->total_collections();
|
|
2125
|
|
2126 // Don't want to do a GC pause while cleanup is being completed!
|
|
2127 wait_for_cleanup_complete();
|
|
2128
|
|
2129 g1_policy()->record_stop_world_start();
|
|
2130 {
|
|
2131 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
|
2132 VM_G1IncCollectionPause op(gc_count_before);
|
|
2133 VMThread::execute(&op);
|
|
2134 }
|
|
2135 }
|
|
2136
|
|
2137 void
|
|
2138 G1CollectedHeap::doConcurrentMark() {
|
|
2139 if (G1ConcMark) {
|
|
2140 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
2141 if (!_cmThread->in_progress()) {
|
|
2142 _cmThread->set_started();
|
|
2143 CGC_lock->notify();
|
|
2144 }
|
|
2145 }
|
|
2146 }
|
|
2147
|
|
2148 class VerifyMarkedObjsClosure: public ObjectClosure {
|
|
2149 G1CollectedHeap* _g1h;
|
|
2150 public:
|
|
2151 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
|
|
2152 void do_object(oop obj) {
|
|
2153 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
|
|
2154 "markandsweep mark should agree with concurrent deadness");
|
|
2155 }
|
|
2156 };
|
|
2157
|
|
2158 void
|
|
2159 G1CollectedHeap::checkConcurrentMark() {
|
|
2160 VerifyMarkedObjsClosure verifycl(this);
|
|
2161 doConcurrentMark();
|
|
2162 // MutexLockerEx x(getMarkBitMapLock(),
|
|
2163 // Mutex::_no_safepoint_check_flag);
|
|
2164 object_iterate(&verifycl);
|
|
2165 }
|
|
2166
|
|
2167 void G1CollectedHeap::do_sync_mark() {
|
|
2168 _cm->checkpointRootsInitial();
|
|
2169 _cm->markFromRoots();
|
|
2170 _cm->checkpointRootsFinal(false);
|
|
2171 }
|
|
2172
|
|
2173 // <NEW PREDICTION>
|
|
2174
|
|
2175 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
|
|
2176 bool young) {
|
|
2177 return _g1_policy->predict_region_elapsed_time_ms(hr, young);
|
|
2178 }
|
|
2179
|
|
2180 void G1CollectedHeap::check_if_region_is_too_expensive(double
|
|
2181 predicted_time_ms) {
|
|
2182 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
|
|
2183 }
|
|
2184
|
|
2185 size_t G1CollectedHeap::pending_card_num() {
|
|
2186 size_t extra_cards = 0;
|
|
2187 JavaThread *curr = Threads::first();
|
|
2188 while (curr != NULL) {
|
|
2189 DirtyCardQueue& dcq = curr->dirty_card_queue();
|
|
2190 extra_cards += dcq.size();
|
|
2191 curr = curr->next();
|
|
2192 }
|
|
2193 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
|
2194 size_t buffer_size = dcqs.buffer_size();
|
|
2195 size_t buffer_num = dcqs.completed_buffers_num();
|
|
2196 return buffer_size * buffer_num + extra_cards;
|
|
2197 }
|
|
2198
|
|
2199 size_t G1CollectedHeap::max_pending_card_num() {
|
|
2200 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
|
2201 size_t buffer_size = dcqs.buffer_size();
|
|
2202 size_t buffer_num = dcqs.completed_buffers_num();
|
|
2203 int thread_num = Threads::number_of_threads();
|
|
2204 return (buffer_num + thread_num) * buffer_size;
|
|
2205 }
|
|
2206
|
|
2207 size_t G1CollectedHeap::cards_scanned() {
|
|
2208 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
|
|
2209 return g1_rset->cardsScanned();
|
|
2210 }
|
|
2211
|
|
2212 void
|
|
2213 G1CollectedHeap::setup_surviving_young_words() {
|
|
2214 guarantee( _surviving_young_words == NULL, "pre-condition" );
|
|
2215 size_t array_length = g1_policy()->young_cset_length();
|
|
2216 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
|
|
2217 if (_surviving_young_words == NULL) {
|
|
2218 vm_exit_out_of_memory(sizeof(size_t) * array_length,
|
|
2219 "Not enough space for young surv words summary.");
|
|
2220 }
|
|
2221 memset(_surviving_young_words, 0, array_length * sizeof(size_t));
|
|
2222 for (size_t i = 0; i < array_length; ++i) {
|
|
2223 guarantee( _surviving_young_words[i] == 0, "invariant" );
|
|
2224 }
|
|
2225 }
|
|
2226
|
|
2227 void
|
|
2228 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
|
|
2229 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
|
2230 size_t array_length = g1_policy()->young_cset_length();
|
|
2231 for (size_t i = 0; i < array_length; ++i)
|
|
2232 _surviving_young_words[i] += surv_young_words[i];
|
|
2233 }
|
|
2234
|
|
2235 void
|
|
2236 G1CollectedHeap::cleanup_surviving_young_words() {
|
|
2237 guarantee( _surviving_young_words != NULL, "pre-condition" );
|
|
2238 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
|
|
2239 _surviving_young_words = NULL;
|
|
2240 }
|
|
2241
|
|
2242 // </NEW PREDICTION>
|
|
2243
|
|
2244 void
|
|
2245 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
|
2246 char verbose_str[128];
|
|
2247 sprintf(verbose_str, "GC pause ");
|
|
2248 if (popular_region != NULL)
|
|
2249 strcat(verbose_str, "(popular)");
|
|
2250 else if (g1_policy()->in_young_gc_mode()) {
|
|
2251 if (g1_policy()->full_young_gcs())
|
|
2252 strcat(verbose_str, "(young)");
|
|
2253 else
|
|
2254 strcat(verbose_str, "(partial)");
|
|
2255 }
|
|
2256 bool reset_should_initiate_conc_mark = false;
|
|
2257 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) {
|
|
2258 // we currently do not allow an initial mark phase to be piggy-backed
|
|
2259 // on a popular pause
|
|
2260 reset_should_initiate_conc_mark = true;
|
|
2261 g1_policy()->unset_should_initiate_conc_mark();
|
|
2262 }
|
|
2263 if (g1_policy()->should_initiate_conc_mark())
|
|
2264 strcat(verbose_str, " (initial-mark)");
|
|
2265
|
|
2266 GCCauseSetter x(this, (popular_region == NULL ?
|
|
2267 GCCause::_g1_inc_collection_pause :
|
|
2268 GCCause::_g1_pop_region_collection_pause));
|
|
2269
|
|
2270 // if PrintGCDetails is on, we'll print long statistics information
|
|
2271 // in the collector policy code, so let's not print this as the output
|
|
2272 // is messy if we do.
|
|
2273 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
2274 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
2275 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
|
2276
|
|
2277 ResourceMark rm;
|
|
2278 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
2279 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
|
2280 guarantee(!is_gc_active(), "collection is not reentrant");
|
|
2281 assert(regions_accounted_for(), "Region leakage!");
|
|
2282 ++_gc_time_stamp;
|
|
2283
|
|
2284 if (g1_policy()->in_young_gc_mode()) {
|
|
2285 assert(check_young_list_well_formed(),
|
|
2286 "young list should be well formed");
|
|
2287 }
|
|
2288
|
|
2289 if (GC_locker::is_active()) {
|
|
2290 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
|
2291 }
|
|
2292
|
|
2293 bool abandoned = false;
|
|
2294 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
|
|
2295 IsGCActiveMark x;
|
|
2296
|
|
2297 gc_prologue(false);
|
|
2298 increment_total_collections();
|
|
2299
|
|
2300 #if G1_REM_SET_LOGGING
|
|
2301 gclog_or_tty->print_cr("\nJust chose CS, heap:");
|
|
2302 print();
|
|
2303 #endif
|
|
2304
|
|
2305 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
|
2306 HandleMark hm; // Discard invalid handles created during verification
|
|
2307 prepare_for_verify();
|
|
2308 gclog_or_tty->print(" VerifyBeforeGC:");
|
|
2309 Universe::verify(false);
|
|
2310 }
|
|
2311
|
|
2312 COMPILER2_PRESENT(DerivedPointerTable::clear());
|
|
2313
|
|
2314 // We want to turn off ref discovere, if necessary, and turn it back on
|
|
2315 // on again later if we do.
|
|
2316 bool was_enabled = ref_processor()->discovery_enabled();
|
|
2317 if (was_enabled) ref_processor()->disable_discovery();
|
|
2318
|
|
2319 // Forget the current alloc region (we might even choose it to be part
|
|
2320 // of the collection set!).
|
|
2321 abandon_cur_alloc_region();
|
|
2322
|
|
2323 // The elapsed time induced by the start time below deliberately elides
|
|
2324 // the possible verification above.
|
|
2325 double start_time_sec = os::elapsedTime();
|
|
2326 GCOverheadReporter::recordSTWStart(start_time_sec);
|
|
2327 size_t start_used_bytes = used();
|
|
2328 if (!G1ConcMark) {
|
|
2329 do_sync_mark();
|
|
2330 }
|
|
2331
|
|
2332 g1_policy()->record_collection_pause_start(start_time_sec,
|
|
2333 start_used_bytes);
|
|
2334
|
|
2335 #if SCAN_ONLY_VERBOSE
|
|
2336 _young_list->print();
|
|
2337 #endif // SCAN_ONLY_VERBOSE
|
|
2338
|
|
2339 if (g1_policy()->should_initiate_conc_mark()) {
|
|
2340 concurrent_mark()->checkpointRootsInitialPre();
|
|
2341 }
|
|
2342 save_marks();
|
|
2343
|
|
2344 // We must do this before any possible evacuation that should propogate
|
|
2345 // marks, including evacuation of popular objects in a popular pause.
|
|
2346 if (mark_in_progress()) {
|
|
2347 double start_time_sec = os::elapsedTime();
|
|
2348
|
|
2349 _cm->drainAllSATBBuffers();
|
|
2350 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
|
|
2351 g1_policy()->record_satb_drain_time(finish_mark_ms);
|
|
2352
|
|
2353 }
|
|
2354 // Record the number of elements currently on the mark stack, so we
|
|
2355 // only iterate over these. (Since evacuation may add to the mark
|
|
2356 // stack, doing more exposes race conditions.) If no mark is in
|
|
2357 // progress, this will be zero.
|
|
2358 _cm->set_oops_do_bound();
|
|
2359
|
|
2360 assert(regions_accounted_for(), "Region leakage.");
|
|
2361
|
|
2362 bool abandoned = false;
|
|
2363
|
|
2364 if (mark_in_progress())
|
|
2365 concurrent_mark()->newCSet();
|
|
2366
|
|
2367 // Now choose the CS.
|
|
2368 if (popular_region == NULL) {
|
|
2369 g1_policy()->choose_collection_set();
|
|
2370 } else {
|
|
2371 // We may be evacuating a single region (for popularity).
|
|
2372 g1_policy()->record_popular_pause_preamble_start();
|
|
2373 popularity_pause_preamble(popular_region);
|
|
2374 g1_policy()->record_popular_pause_preamble_end();
|
|
2375 abandoned = (g1_policy()->collection_set() == NULL);
|
|
2376 // Now we allow more regions to be added (we have to collect
|
|
2377 // all popular regions).
|
|
2378 if (!abandoned) {
|
|
2379 g1_policy()->choose_collection_set(popular_region);
|
|
2380 }
|
|
2381 }
|
|
2382 // We may abandon a pause if we find no region that will fit in the MMU
|
|
2383 // pause.
|
|
2384 abandoned = (g1_policy()->collection_set() == NULL);
|
|
2385
|
|
2386 // Nothing to do if we were unable to choose a collection set.
|
|
2387 if (!abandoned) {
|
|
2388 #if G1_REM_SET_LOGGING
|
|
2389 gclog_or_tty->print_cr("\nAfter pause, heap:");
|
|
2390 print();
|
|
2391 #endif
|
|
2392
|
|
2393 setup_surviving_young_words();
|
|
2394
|
|
2395 // Set up the gc allocation regions.
|
|
2396 get_gc_alloc_regions();
|
|
2397
|
|
2398 // Actually do the work...
|
|
2399 evacuate_collection_set();
|
|
2400 free_collection_set(g1_policy()->collection_set());
|
|
2401 g1_policy()->clear_collection_set();
|
|
2402
|
|
2403 if (popular_region != NULL) {
|
|
2404 // We have to wait until now, because we don't want the region to
|
|
2405 // be rescheduled for pop-evac during RS update.
|
|
2406 popular_region->set_popular_pending(false);
|
|
2407 }
|
|
2408
|
|
2409 release_gc_alloc_regions();
|
|
2410
|
|
2411 cleanup_surviving_young_words();
|
|
2412
|
|
2413 if (g1_policy()->in_young_gc_mode()) {
|
|
2414 _young_list->reset_sampled_info();
|
|
2415 assert(check_young_list_empty(true),
|
|
2416 "young list should be empty");
|
|
2417
|
|
2418 #if SCAN_ONLY_VERBOSE
|
|
2419 _young_list->print();
|
|
2420 #endif // SCAN_ONLY_VERBOSE
|
|
2421
|
|
2422 _young_list->reset_auxilary_lists();
|
|
2423 }
|
|
2424 } else {
|
|
2425 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
|
2426 }
|
|
2427
|
|
2428 if (evacuation_failed()) {
|
|
2429 _summary_bytes_used = recalculate_used();
|
|
2430 } else {
|
|
2431 // The "used" of the the collection set have already been subtracted
|
|
2432 // when they were freed. Add in the bytes evacuated.
|
|
2433 _summary_bytes_used += g1_policy()->bytes_in_to_space();
|
|
2434 }
|
|
2435
|
|
2436 if (g1_policy()->in_young_gc_mode() &&
|
|
2437 g1_policy()->should_initiate_conc_mark()) {
|
|
2438 concurrent_mark()->checkpointRootsInitialPost();
|
|
2439 set_marking_started();
|
|
2440 doConcurrentMark();
|
|
2441 }
|
|
2442
|
|
2443 #if SCAN_ONLY_VERBOSE
|
|
2444 _young_list->print();
|
|
2445 #endif // SCAN_ONLY_VERBOSE
|
|
2446
|
|
2447 double end_time_sec = os::elapsedTime();
|
|
2448 g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
|
|
2449 GCOverheadReporter::recordSTWEnd(end_time_sec);
|
|
2450 g1_policy()->record_collection_pause_end(popular_region != NULL,
|
|
2451 abandoned);
|
|
2452
|
|
2453 assert(regions_accounted_for(), "Region leakage.");
|
|
2454
|
|
2455 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
|
2456 HandleMark hm; // Discard invalid handles created during verification
|
|
2457 gclog_or_tty->print(" VerifyAfterGC:");
|
|
2458 Universe::verify(false);
|
|
2459 }
|
|
2460
|
|
2461 if (was_enabled) ref_processor()->enable_discovery();
|
|
2462
|
|
2463 {
|
|
2464 size_t expand_bytes = g1_policy()->expansion_amount();
|
|
2465 if (expand_bytes > 0) {
|
|
2466 size_t bytes_before = capacity();
|
|
2467 expand(expand_bytes);
|
|
2468 }
|
|
2469 }
|
|
2470
|
|
2471 if (mark_in_progress())
|
|
2472 concurrent_mark()->update_g1_committed();
|
|
2473
|
|
2474 gc_epilogue(false);
|
|
2475 }
|
|
2476
|
|
2477 assert(verify_region_lists(), "Bad region lists.");
|
|
2478
|
|
2479 if (reset_should_initiate_conc_mark)
|
|
2480 g1_policy()->set_should_initiate_conc_mark();
|
|
2481
|
|
2482 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
|
|
2483 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
|
|
2484 print_tracing_info();
|
|
2485 vm_exit(-1);
|
|
2486 }
|
|
2487 }
|
|
2488
|
|
2489 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
|
2490 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
|
|
2491 HeapWord* original_top = NULL;
|
|
2492 if (r != NULL)
|
|
2493 original_top = r->top();
|
|
2494
|
|
2495 // We will want to record the used space in r as being there before gc.
|
|
2496 // One we install it as a GC alloc region it's eligible for allocation.
|
|
2497 // So record it now and use it later.
|
|
2498 size_t r_used = 0;
|
|
2499 if (r != NULL) {
|
|
2500 r_used = r->used();
|
|
2501
|
|
2502 if (ParallelGCThreads > 0) {
|
|
2503 // need to take the lock to guard against two threads calling
|
|
2504 // get_gc_alloc_region concurrently (very unlikely but...)
|
|
2505 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
|
2506 r->save_marks();
|
|
2507 }
|
|
2508 }
|
|
2509 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
|
|
2510 _gc_alloc_regions[purpose] = r;
|
|
2511 if (old_alloc_region != NULL) {
|
|
2512 // Replace aliases too.
|
|
2513 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
2514 if (_gc_alloc_regions[ap] == old_alloc_region) {
|
|
2515 _gc_alloc_regions[ap] = r;
|
|
2516 }
|
|
2517 }
|
|
2518 }
|
|
2519 if (r != NULL) {
|
|
2520 push_gc_alloc_region(r);
|
|
2521 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
|
|
2522 // We are using a region as a GC alloc region after it has been used
|
|
2523 // as a mutator allocation region during the current marking cycle.
|
|
2524 // The mutator-allocated objects are currently implicitly marked, but
|
|
2525 // when we move hr->next_top_at_mark_start() forward at the the end
|
|
2526 // of the GC pause, they won't be. We therefore mark all objects in
|
|
2527 // the "gap". We do this object-by-object, since marking densely
|
|
2528 // does not currently work right with marking bitmap iteration. This
|
|
2529 // means we rely on TLAB filling at the start of pauses, and no
|
|
2530 // "resuscitation" of filled TLAB's. If we want to do this, we need
|
|
2531 // to fix the marking bitmap iteration.
|
|
2532 HeapWord* curhw = r->next_top_at_mark_start();
|
|
2533 HeapWord* t = original_top;
|
|
2534
|
|
2535 while (curhw < t) {
|
|
2536 oop cur = (oop)curhw;
|
|
2537 // We'll assume parallel for generality. This is rare code.
|
|
2538 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
|
|
2539 curhw = curhw + cur->size();
|
|
2540 }
|
|
2541 assert(curhw == t, "Should have parsed correctly.");
|
|
2542 }
|
|
2543 if (G1PolicyVerbose > 1) {
|
|
2544 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
|
|
2545 "for survivors:", r->bottom(), original_top, r->end());
|
|
2546 r->print();
|
|
2547 }
|
|
2548 g1_policy()->record_before_bytes(r_used);
|
|
2549 }
|
|
2550 }
|
|
2551
|
|
2552 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
|
|
2553 assert(Thread::current()->is_VM_thread() ||
|
|
2554 par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
|
|
2555 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
|
|
2556 "Precondition.");
|
|
2557 hr->set_is_gc_alloc_region(true);
|
|
2558 hr->set_next_gc_alloc_region(_gc_alloc_region_list);
|
|
2559 _gc_alloc_region_list = hr;
|
|
2560 }
|
|
2561
|
|
2562 #ifdef G1_DEBUG
|
|
2563 class FindGCAllocRegion: public HeapRegionClosure {
|
|
2564 public:
|
|
2565 bool doHeapRegion(HeapRegion* r) {
|
|
2566 if (r->is_gc_alloc_region()) {
|
|
2567 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
|
|
2568 r->hrs_index(), r->bottom());
|
|
2569 }
|
|
2570 return false;
|
|
2571 }
|
|
2572 };
|
|
2573 #endif // G1_DEBUG
|
|
2574
|
|
2575 void G1CollectedHeap::forget_alloc_region_list() {
|
|
2576 assert(Thread::current()->is_VM_thread(), "Precondition");
|
|
2577 while (_gc_alloc_region_list != NULL) {
|
|
2578 HeapRegion* r = _gc_alloc_region_list;
|
|
2579 assert(r->is_gc_alloc_region(), "Invariant.");
|
|
2580 _gc_alloc_region_list = r->next_gc_alloc_region();
|
|
2581 r->set_next_gc_alloc_region(NULL);
|
|
2582 r->set_is_gc_alloc_region(false);
|
|
2583 if (r->is_empty()) {
|
|
2584 ++_free_regions;
|
|
2585 }
|
|
2586 }
|
|
2587 #ifdef G1_DEBUG
|
|
2588 FindGCAllocRegion fa;
|
|
2589 heap_region_iterate(&fa);
|
|
2590 #endif // G1_DEBUG
|
|
2591 }
|
|
2592
|
|
2593
|
|
2594 bool G1CollectedHeap::check_gc_alloc_regions() {
|
|
2595 // TODO: allocation regions check
|
|
2596 return true;
|
|
2597 }
|
|
2598
|
|
2599 void G1CollectedHeap::get_gc_alloc_regions() {
|
|
2600 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
2601 // Create new GC alloc regions.
|
|
2602 HeapRegion* alloc_region = _gc_alloc_regions[ap];
|
|
2603 // Clear this alloc region, so that in case it turns out to be
|
|
2604 // unacceptable, we end up with no allocation region, rather than a bad
|
|
2605 // one.
|
|
2606 _gc_alloc_regions[ap] = NULL;
|
|
2607 if (alloc_region == NULL || alloc_region->in_collection_set()) {
|
|
2608 // Can't re-use old one. Allocate a new one.
|
|
2609 alloc_region = newAllocRegionWithExpansion(ap, 0);
|
|
2610 }
|
|
2611 if (alloc_region != NULL) {
|
|
2612 set_gc_alloc_region(ap, alloc_region);
|
|
2613 }
|
|
2614 }
|
|
2615 // Set alternative regions for allocation purposes that have reached
|
|
2616 // thier limit.
|
|
2617 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
2618 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
|
|
2619 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
|
|
2620 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
|
|
2621 }
|
|
2622 }
|
|
2623 assert(check_gc_alloc_regions(), "alloc regions messed up");
|
|
2624 }
|
|
2625
|
|
2626 void G1CollectedHeap::release_gc_alloc_regions() {
|
|
2627 // We keep a separate list of all regions that have been alloc regions in
|
|
2628 // the current collection pause. Forget that now.
|
|
2629 forget_alloc_region_list();
|
|
2630
|
|
2631 // The current alloc regions contain objs that have survived
|
|
2632 // collection. Make them no longer GC alloc regions.
|
|
2633 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
2634 HeapRegion* r = _gc_alloc_regions[ap];
|
|
2635 if (r != NULL && r->is_empty()) {
|
|
2636 {
|
|
2637 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
2638 r->set_zero_fill_complete();
|
|
2639 put_free_region_on_list_locked(r);
|
|
2640 }
|
|
2641 }
|
|
2642 // set_gc_alloc_region will also NULLify all aliases to the region
|
|
2643 set_gc_alloc_region(ap, NULL);
|
|
2644 _gc_alloc_region_counts[ap] = 0;
|
|
2645 }
|
|
2646 }
|
|
2647
|
|
2648 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
|
2649 _drain_in_progress = false;
|
|
2650 set_evac_failure_closure(cl);
|
|
2651 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
|
|
2652 }
|
|
2653
|
|
2654 void G1CollectedHeap::finalize_for_evac_failure() {
|
|
2655 assert(_evac_failure_scan_stack != NULL &&
|
|
2656 _evac_failure_scan_stack->length() == 0,
|
|
2657 "Postcondition");
|
|
2658 assert(!_drain_in_progress, "Postcondition");
|
|
2659 // Don't have to delete, since the scan stack is a resource object.
|
|
2660 _evac_failure_scan_stack = NULL;
|
|
2661 }
|
|
2662
|
|
2663
|
|
2664
|
|
2665 // *** Sequential G1 Evacuation
|
|
2666
|
|
2667 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
|
|
2668 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
|
|
2669 // let the caller handle alloc failure
|
|
2670 if (alloc_region == NULL) return NULL;
|
|
2671 assert(isHumongous(word_size) || !alloc_region->isHumongous(),
|
|
2672 "Either the object is humongous or the region isn't");
|
|
2673 HeapWord* block = alloc_region->allocate(word_size);
|
|
2674 if (block == NULL) {
|
|
2675 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
|
|
2676 }
|
|
2677 return block;
|
|
2678 }
|
|
2679
|
|
2680 class G1IsAliveClosure: public BoolObjectClosure {
|
|
2681 G1CollectedHeap* _g1;
|
|
2682 public:
|
|
2683 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
|
2684 void do_object(oop p) { assert(false, "Do not call."); }
|
|
2685 bool do_object_b(oop p) {
|
|
2686 // It is reachable if it is outside the collection set, or is inside
|
|
2687 // and forwarded.
|
|
2688
|
|
2689 #ifdef G1_DEBUG
|
|
2690 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
|
|
2691 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
|
|
2692 !_g1->obj_in_cs(p) || p->is_forwarded());
|
|
2693 #endif // G1_DEBUG
|
|
2694
|
|
2695 return !_g1->obj_in_cs(p) || p->is_forwarded();
|
|
2696 }
|
|
2697 };
|
|
2698
|
|
2699 class G1KeepAliveClosure: public OopClosure {
|
|
2700 G1CollectedHeap* _g1;
|
|
2701 public:
|
|
2702 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
|
2703 void do_oop(narrowOop* p) {
|
|
2704 guarantee(false, "NYI");
|
|
2705 }
|
|
2706 void do_oop(oop* p) {
|
|
2707 oop obj = *p;
|
|
2708 #ifdef G1_DEBUG
|
|
2709 if (PrintGC && Verbose) {
|
|
2710 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
|
|
2711 p, (void*) obj, (void*) *p);
|
|
2712 }
|
|
2713 #endif // G1_DEBUG
|
|
2714
|
|
2715 if (_g1->obj_in_cs(obj)) {
|
|
2716 assert( obj->is_forwarded(), "invariant" );
|
|
2717 *p = obj->forwardee();
|
|
2718
|
|
2719 #ifdef G1_DEBUG
|
|
2720 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
|
|
2721 (void*) obj, (void*) *p);
|
|
2722 #endif // G1_DEBUG
|
|
2723 }
|
|
2724 }
|
|
2725 };
|
|
2726
|
|
2727 class RecreateRSetEntriesClosure: public OopClosure {
|
|
2728 private:
|
|
2729 G1CollectedHeap* _g1;
|
|
2730 G1RemSet* _g1_rem_set;
|
|
2731 HeapRegion* _from;
|
|
2732 public:
|
|
2733 RecreateRSetEntriesClosure(G1CollectedHeap* g1, HeapRegion* from) :
|
|
2734 _g1(g1), _g1_rem_set(g1->g1_rem_set()), _from(from)
|
|
2735 {}
|
|
2736
|
|
2737 void do_oop(narrowOop* p) {
|
|
2738 guarantee(false, "NYI");
|
|
2739 }
|
|
2740 void do_oop(oop* p) {
|
|
2741 assert(_from->is_in_reserved(p), "paranoia");
|
|
2742 if (*p != NULL) {
|
|
2743 _g1_rem_set->write_ref(_from, p);
|
|
2744 }
|
|
2745 }
|
|
2746 };
|
|
2747
|
|
2748 class RemoveSelfPointerClosure: public ObjectClosure {
|
|
2749 private:
|
|
2750 G1CollectedHeap* _g1;
|
|
2751 ConcurrentMark* _cm;
|
|
2752 HeapRegion* _hr;
|
|
2753 HeapWord* _last_self_forwarded_end;
|
|
2754 size_t _prev_marked_bytes;
|
|
2755 size_t _next_marked_bytes;
|
|
2756 public:
|
|
2757 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr) :
|
|
2758 _g1(g1), _cm(_g1->concurrent_mark()), _hr(hr),
|
|
2759 _last_self_forwarded_end(_hr->bottom()),
|
|
2760 _prev_marked_bytes(0), _next_marked_bytes(0)
|
|
2761 {}
|
|
2762
|
|
2763 size_t prev_marked_bytes() { return _prev_marked_bytes; }
|
|
2764 size_t next_marked_bytes() { return _next_marked_bytes; }
|
|
2765
|
|
2766 void fill_remainder() {
|
|
2767 HeapWord* limit = _hr->top();
|
|
2768 MemRegion mr(_last_self_forwarded_end, limit);
|
|
2769 if (!mr.is_empty()) {
|
|
2770 SharedHeap::fill_region_with_object(mr);
|
|
2771 _cm->clearRangeBothMaps(mr);
|
|
2772 _hr->declare_filled_region_to_BOT(mr);
|
|
2773 }
|
|
2774 }
|
|
2775
|
|
2776 void do_object(oop obj) {
|
|
2777 if (obj->is_forwarded()) {
|
|
2778 if (obj->forwardee() == obj) {
|
|
2779 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
|
|
2780 _cm->markPrev(obj);
|
|
2781 assert(_cm->isPrevMarked(obj), "Should be marked!");
|
|
2782 _prev_marked_bytes += (obj->size() * HeapWordSize);
|
|
2783 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
|
|
2784 _cm->markAndGrayObjectIfNecessary(obj);
|
|
2785 }
|
|
2786 HeapWord* obj_start = (HeapWord*)obj;
|
|
2787 if (obj_start > _last_self_forwarded_end) {
|
|
2788 MemRegion mr(_last_self_forwarded_end, obj_start);
|
|
2789 SharedHeap::fill_region_with_object(mr);
|
|
2790 assert(_cm->isPrevMarked(obj), "Should be marked!");
|
|
2791 _cm->clearRangeBothMaps(mr);
|
|
2792 assert(_cm->isPrevMarked(obj), "Should be marked!");
|
|
2793 _hr->declare_filled_region_to_BOT(mr);
|
|
2794 }
|
|
2795 _last_self_forwarded_end = obj_start + obj->size();
|
|
2796 obj->set_mark(markOopDesc::prototype());
|
|
2797
|
|
2798 // While we were processing RSet buffers during the
|
|
2799 // collection, we actually didn't scan any cards on the
|
|
2800 // collection set, since we didn't want to update remebered
|
|
2801 // sets with entries that point into the collection set, given
|
|
2802 // that live objects fromthe collection set are about to move
|
|
2803 // and such entries will be stale very soon. This change also
|
|
2804 // dealt with a reliability issue which involved scanning a
|
|
2805 // card in the collection set and coming across an array that
|
|
2806 // was being chunked and looking malformed. The problem is
|
|
2807 // that, if evacuation fails, we might have remembered set
|
|
2808 // entries missing given that we skipped cards on the
|
|
2809 // collection set. So, we'll recreate such entries now.
|
|
2810 RecreateRSetEntriesClosure cl(_g1, _hr);
|
|
2811 obj->oop_iterate(&cl);
|
|
2812
|
|
2813 assert(_cm->isPrevMarked(obj), "Should be marked!");
|
|
2814 }
|
|
2815 }
|
|
2816 }
|
|
2817 };
|
|
2818
|
|
2819 void G1CollectedHeap::remove_self_forwarding_pointers() {
|
|
2820 HeapRegion* cur = g1_policy()->collection_set();
|
|
2821
|
|
2822 while (cur != NULL) {
|
|
2823 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
|
|
2824
|
|
2825 if (cur->evacuation_failed()) {
|
|
2826 RemoveSelfPointerClosure rspc(_g1h, cur);
|
|
2827 assert(cur->in_collection_set(), "bad CS");
|
|
2828 cur->object_iterate(&rspc);
|
|
2829 rspc.fill_remainder();
|
|
2830
|
|
2831 // A number of manipulations to make the TAMS be the current top,
|
|
2832 // and the marked bytes be the ones observed in the iteration.
|
|
2833 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
|
|
2834 // The comments below are the postconditions achieved by the
|
|
2835 // calls. Note especially the last such condition, which says that
|
|
2836 // the count of marked bytes has been properly restored.
|
|
2837 cur->note_start_of_marking(false);
|
|
2838 // _next_top_at_mark_start == top, _next_marked_bytes == 0
|
|
2839 cur->add_to_marked_bytes(rspc.prev_marked_bytes());
|
|
2840 // _next_marked_bytes == prev_marked_bytes.
|
|
2841 cur->note_end_of_marking();
|
|
2842 // _prev_top_at_mark_start == top(),
|
|
2843 // _prev_marked_bytes == prev_marked_bytes
|
|
2844 }
|
|
2845 // If there is no mark in progress, we modified the _next variables
|
|
2846 // above needlessly, but harmlessly.
|
|
2847 if (_g1h->mark_in_progress()) {
|
|
2848 cur->note_start_of_marking(false);
|
|
2849 // _next_top_at_mark_start == top, _next_marked_bytes == 0
|
|
2850 // _next_marked_bytes == next_marked_bytes.
|
|
2851 }
|
|
2852
|
|
2853 // Now make sure the region has the right index in the sorted array.
|
|
2854 g1_policy()->note_change_in_marked_bytes(cur);
|
|
2855 }
|
|
2856 cur = cur->next_in_collection_set();
|
|
2857 }
|
|
2858 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
|
|
2859
|
|
2860 // Now restore saved marks, if any.
|
|
2861 if (_objs_with_preserved_marks != NULL) {
|
|
2862 assert(_preserved_marks_of_objs != NULL, "Both or none.");
|
|
2863 assert(_objs_with_preserved_marks->length() ==
|
|
2864 _preserved_marks_of_objs->length(), "Both or none.");
|
|
2865 guarantee(_objs_with_preserved_marks->length() ==
|
|
2866 _preserved_marks_of_objs->length(), "Both or none.");
|
|
2867 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
|
|
2868 oop obj = _objs_with_preserved_marks->at(i);
|
|
2869 markOop m = _preserved_marks_of_objs->at(i);
|
|
2870 obj->set_mark(m);
|
|
2871 }
|
|
2872 // Delete the preserved marks growable arrays (allocated on the C heap).
|
|
2873 delete _objs_with_preserved_marks;
|
|
2874 delete _preserved_marks_of_objs;
|
|
2875 _objs_with_preserved_marks = NULL;
|
|
2876 _preserved_marks_of_objs = NULL;
|
|
2877 }
|
|
2878 }
|
|
2879
|
|
2880 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
|
|
2881 _evac_failure_scan_stack->push(obj);
|
|
2882 }
|
|
2883
|
|
2884 void G1CollectedHeap::drain_evac_failure_scan_stack() {
|
|
2885 assert(_evac_failure_scan_stack != NULL, "precondition");
|
|
2886
|
|
2887 while (_evac_failure_scan_stack->length() > 0) {
|
|
2888 oop obj = _evac_failure_scan_stack->pop();
|
|
2889 _evac_failure_closure->set_region(heap_region_containing(obj));
|
|
2890 obj->oop_iterate_backwards(_evac_failure_closure);
|
|
2891 }
|
|
2892 }
|
|
2893
|
|
2894 void G1CollectedHeap::handle_evacuation_failure(oop old) {
|
|
2895 markOop m = old->mark();
|
|
2896 // forward to self
|
|
2897 assert(!old->is_forwarded(), "precondition");
|
|
2898
|
|
2899 old->forward_to(old);
|
|
2900 handle_evacuation_failure_common(old, m);
|
|
2901 }
|
|
2902
|
|
2903 oop
|
|
2904 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
|
|
2905 oop old) {
|
|
2906 markOop m = old->mark();
|
|
2907 oop forward_ptr = old->forward_to_atomic(old);
|
|
2908 if (forward_ptr == NULL) {
|
|
2909 // Forward-to-self succeeded.
|
|
2910 if (_evac_failure_closure != cl) {
|
|
2911 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
|
|
2912 assert(!_drain_in_progress,
|
|
2913 "Should only be true while someone holds the lock.");
|
|
2914 // Set the global evac-failure closure to the current thread's.
|
|
2915 assert(_evac_failure_closure == NULL, "Or locking has failed.");
|
|
2916 set_evac_failure_closure(cl);
|
|
2917 // Now do the common part.
|
|
2918 handle_evacuation_failure_common(old, m);
|
|
2919 // Reset to NULL.
|
|
2920 set_evac_failure_closure(NULL);
|
|
2921 } else {
|
|
2922 // The lock is already held, and this is recursive.
|
|
2923 assert(_drain_in_progress, "This should only be the recursive case.");
|
|
2924 handle_evacuation_failure_common(old, m);
|
|
2925 }
|
|
2926 return old;
|
|
2927 } else {
|
|
2928 // Someone else had a place to copy it.
|
|
2929 return forward_ptr;
|
|
2930 }
|
|
2931 }
|
|
2932
|
|
2933 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
|
|
2934 set_evacuation_failed(true);
|
|
2935
|
|
2936 preserve_mark_if_necessary(old, m);
|
|
2937
|
|
2938 HeapRegion* r = heap_region_containing(old);
|
|
2939 if (!r->evacuation_failed()) {
|
|
2940 r->set_evacuation_failed(true);
|
|
2941 if (G1TraceRegions) {
|
|
2942 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
|
|
2943 "["PTR_FORMAT","PTR_FORMAT")\n",
|
|
2944 r, r->bottom(), r->end());
|
|
2945 }
|
|
2946 }
|
|
2947
|
|
2948 push_on_evac_failure_scan_stack(old);
|
|
2949
|
|
2950 if (!_drain_in_progress) {
|
|
2951 // prevent recursion in copy_to_survivor_space()
|
|
2952 _drain_in_progress = true;
|
|
2953 drain_evac_failure_scan_stack();
|
|
2954 _drain_in_progress = false;
|
|
2955 }
|
|
2956 }
|
|
2957
|
|
2958 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
|
|
2959 if (m != markOopDesc::prototype()) {
|
|
2960 if (_objs_with_preserved_marks == NULL) {
|
|
2961 assert(_preserved_marks_of_objs == NULL, "Both or none.");
|
|
2962 _objs_with_preserved_marks =
|
|
2963 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
|
|
2964 _preserved_marks_of_objs =
|
|
2965 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
|
|
2966 }
|
|
2967 _objs_with_preserved_marks->push(obj);
|
|
2968 _preserved_marks_of_objs->push(m);
|
|
2969 }
|
|
2970 }
|
|
2971
|
|
2972 // *** Parallel G1 Evacuation
|
|
2973
|
|
2974 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
|
2975 size_t word_size) {
|
|
2976 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
|
|
2977 // let the caller handle alloc failure
|
|
2978 if (alloc_region == NULL) return NULL;
|
|
2979
|
|
2980 HeapWord* block = alloc_region->par_allocate(word_size);
|
|
2981 if (block == NULL) {
|
|
2982 MutexLockerEx x(par_alloc_during_gc_lock(),
|
|
2983 Mutex::_no_safepoint_check_flag);
|
|
2984 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
|
|
2985 }
|
|
2986 return block;
|
|
2987 }
|
|
2988
|
|
2989 HeapWord*
|
|
2990 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
|
|
2991 HeapRegion* alloc_region,
|
|
2992 bool par,
|
|
2993 size_t word_size) {
|
|
2994 HeapWord* block = NULL;
|
|
2995 // In the parallel case, a previous thread to obtain the lock may have
|
|
2996 // already assigned a new gc_alloc_region.
|
|
2997 if (alloc_region != _gc_alloc_regions[purpose]) {
|
|
2998 assert(par, "But should only happen in parallel case.");
|
|
2999 alloc_region = _gc_alloc_regions[purpose];
|
|
3000 if (alloc_region == NULL) return NULL;
|
|
3001 block = alloc_region->par_allocate(word_size);
|
|
3002 if (block != NULL) return block;
|
|
3003 // Otherwise, continue; this new region is empty, too.
|
|
3004 }
|
|
3005 assert(alloc_region != NULL, "We better have an allocation region");
|
|
3006 // Another thread might have obtained alloc_region for the given
|
|
3007 // purpose, and might be attempting to allocate in it, and might
|
|
3008 // succeed. Therefore, we can't do the "finalization" stuff on the
|
|
3009 // region below until we're sure the last allocation has happened.
|
|
3010 // We ensure this by allocating the remaining space with a garbage
|
|
3011 // object.
|
|
3012 if (par) par_allocate_remaining_space(alloc_region);
|
|
3013 // Now we can do the post-GC stuff on the region.
|
|
3014 alloc_region->note_end_of_copying();
|
|
3015 g1_policy()->record_after_bytes(alloc_region->used());
|
|
3016
|
|
3017 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
|
|
3018 // Cannot allocate more regions for the given purpose.
|
|
3019 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
|
|
3020 // Is there an alternative?
|
|
3021 if (purpose != alt_purpose) {
|
|
3022 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
|
|
3023 // Has not the alternative region been aliased?
|
|
3024 if (alloc_region != alt_region) {
|
|
3025 // Try to allocate in the alternative region.
|
|
3026 if (par) {
|
|
3027 block = alt_region->par_allocate(word_size);
|
|
3028 } else {
|
|
3029 block = alt_region->allocate(word_size);
|
|
3030 }
|
|
3031 // Make an alias.
|
|
3032 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
|
|
3033 }
|
|
3034 if (block != NULL) {
|
|
3035 return block;
|
|
3036 }
|
|
3037 // Both the allocation region and the alternative one are full
|
|
3038 // and aliased, replace them with a new allocation region.
|
|
3039 purpose = alt_purpose;
|
|
3040 } else {
|
|
3041 set_gc_alloc_region(purpose, NULL);
|
|
3042 return NULL;
|
|
3043 }
|
|
3044 }
|
|
3045
|
|
3046 // Now allocate a new region for allocation.
|
|
3047 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
|
|
3048
|
|
3049 // let the caller handle alloc failure
|
|
3050 if (alloc_region != NULL) {
|
|
3051
|
|
3052 assert(check_gc_alloc_regions(), "alloc regions messed up");
|
|
3053 assert(alloc_region->saved_mark_at_top(),
|
|
3054 "Mark should have been saved already.");
|
|
3055 // We used to assert that the region was zero-filled here, but no
|
|
3056 // longer.
|
|
3057
|
|
3058 // This must be done last: once it's installed, other regions may
|
|
3059 // allocate in it (without holding the lock.)
|
|
3060 set_gc_alloc_region(purpose, alloc_region);
|
|
3061
|
|
3062 if (par) {
|
|
3063 block = alloc_region->par_allocate(word_size);
|
|
3064 } else {
|
|
3065 block = alloc_region->allocate(word_size);
|
|
3066 }
|
|
3067 // Caller handles alloc failure.
|
|
3068 } else {
|
|
3069 // This sets other apis using the same old alloc region to NULL, also.
|
|
3070 set_gc_alloc_region(purpose, NULL);
|
|
3071 }
|
|
3072 return block; // May be NULL.
|
|
3073 }
|
|
3074
|
|
3075 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
|
|
3076 HeapWord* block = NULL;
|
|
3077 size_t free_words;
|
|
3078 do {
|
|
3079 free_words = r->free()/HeapWordSize;
|
|
3080 // If there's too little space, no one can allocate, so we're done.
|
|
3081 if (free_words < (size_t)oopDesc::header_size()) return;
|
|
3082 // Otherwise, try to claim it.
|
|
3083 block = r->par_allocate(free_words);
|
|
3084 } while (block == NULL);
|
|
3085 SharedHeap::fill_region_with_object(MemRegion(block, free_words));
|
|
3086 }
|
|
3087
|
|
3088 #define use_local_bitmaps 1
|
|
3089 #define verify_local_bitmaps 0
|
|
3090
|
|
3091 #ifndef PRODUCT
|
|
3092
|
|
3093 class GCLabBitMap;
|
|
3094 class GCLabBitMapClosure: public BitMapClosure {
|
|
3095 private:
|
|
3096 ConcurrentMark* _cm;
|
|
3097 GCLabBitMap* _bitmap;
|
|
3098
|
|
3099 public:
|
|
3100 GCLabBitMapClosure(ConcurrentMark* cm,
|
|
3101 GCLabBitMap* bitmap) {
|
|
3102 _cm = cm;
|
|
3103 _bitmap = bitmap;
|
|
3104 }
|
|
3105
|
|
3106 virtual bool do_bit(size_t offset);
|
|
3107 };
|
|
3108
|
|
3109 #endif // PRODUCT
|
|
3110
|
|
3111 #define oop_buffer_length 256
|
|
3112
|
|
3113 class GCLabBitMap: public BitMap {
|
|
3114 private:
|
|
3115 ConcurrentMark* _cm;
|
|
3116
|
|
3117 int _shifter;
|
|
3118 size_t _bitmap_word_covers_words;
|
|
3119
|
|
3120 // beginning of the heap
|
|
3121 HeapWord* _heap_start;
|
|
3122
|
|
3123 // this is the actual start of the GCLab
|
|
3124 HeapWord* _real_start_word;
|
|
3125
|
|
3126 // this is the actual end of the GCLab
|
|
3127 HeapWord* _real_end_word;
|
|
3128
|
|
3129 // this is the first word, possibly located before the actual start
|
|
3130 // of the GCLab, that corresponds to the first bit of the bitmap
|
|
3131 HeapWord* _start_word;
|
|
3132
|
|
3133 // size of a GCLab in words
|
|
3134 size_t _gclab_word_size;
|
|
3135
|
|
3136 static int shifter() {
|
|
3137 return MinObjAlignment - 1;
|
|
3138 }
|
|
3139
|
|
3140 // how many heap words does a single bitmap word corresponds to?
|
|
3141 static size_t bitmap_word_covers_words() {
|
|
3142 return BitsPerWord << shifter();
|
|
3143 }
|
|
3144
|
|
3145 static size_t gclab_word_size() {
|
|
3146 return ParallelGCG1AllocBufferSize / HeapWordSize;
|
|
3147 }
|
|
3148
|
|
3149 static size_t bitmap_size_in_bits() {
|
|
3150 size_t bits_in_bitmap = gclab_word_size() >> shifter();
|
|
3151 // We are going to ensure that the beginning of a word in this
|
|
3152 // bitmap also corresponds to the beginning of a word in the
|
|
3153 // global marking bitmap. To handle the case where a GCLab
|
|
3154 // starts from the middle of the bitmap, we need to add enough
|
|
3155 // space (i.e. up to a bitmap word) to ensure that we have
|
|
3156 // enough bits in the bitmap.
|
|
3157 return bits_in_bitmap + BitsPerWord - 1;
|
|
3158 }
|
|
3159 public:
|
|
3160 GCLabBitMap(HeapWord* heap_start)
|
|
3161 : BitMap(bitmap_size_in_bits()),
|
|
3162 _cm(G1CollectedHeap::heap()->concurrent_mark()),
|
|
3163 _shifter(shifter()),
|
|
3164 _bitmap_word_covers_words(bitmap_word_covers_words()),
|
|
3165 _heap_start(heap_start),
|
|
3166 _gclab_word_size(gclab_word_size()),
|
|
3167 _real_start_word(NULL),
|
|
3168 _real_end_word(NULL),
|
|
3169 _start_word(NULL)
|
|
3170 {
|
|
3171 guarantee( size_in_words() >= bitmap_size_in_words(),
|
|
3172 "just making sure");
|
|
3173 }
|
|
3174
|
|
3175 inline unsigned heapWordToOffset(HeapWord* addr) {
|
|
3176 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
|
|
3177 assert(offset < size(), "offset should be within bounds");
|
|
3178 return offset;
|
|
3179 }
|
|
3180
|
|
3181 inline HeapWord* offsetToHeapWord(size_t offset) {
|
|
3182 HeapWord* addr = _start_word + (offset << _shifter);
|
|
3183 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
|
|
3184 return addr;
|
|
3185 }
|
|
3186
|
|
3187 bool fields_well_formed() {
|
|
3188 bool ret1 = (_real_start_word == NULL) &&
|
|
3189 (_real_end_word == NULL) &&
|
|
3190 (_start_word == NULL);
|
|
3191 if (ret1)
|
|
3192 return true;
|
|
3193
|
|
3194 bool ret2 = _real_start_word >= _start_word &&
|
|
3195 _start_word < _real_end_word &&
|
|
3196 (_real_start_word + _gclab_word_size) == _real_end_word &&
|
|
3197 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
|
|
3198 > _real_end_word;
|
|
3199 return ret2;
|
|
3200 }
|
|
3201
|
|
3202 inline bool mark(HeapWord* addr) {
|
|
3203 guarantee(use_local_bitmaps, "invariant");
|
|
3204 assert(fields_well_formed(), "invariant");
|
|
3205
|
|
3206 if (addr >= _real_start_word && addr < _real_end_word) {
|
|
3207 assert(!isMarked(addr), "should not have already been marked");
|
|
3208
|
|
3209 // first mark it on the bitmap
|
|
3210 at_put(heapWordToOffset(addr), true);
|
|
3211
|
|
3212 return true;
|
|
3213 } else {
|
|
3214 return false;
|
|
3215 }
|
|
3216 }
|
|
3217
|
|
3218 inline bool isMarked(HeapWord* addr) {
|
|
3219 guarantee(use_local_bitmaps, "invariant");
|
|
3220 assert(fields_well_formed(), "invariant");
|
|
3221
|
|
3222 return at(heapWordToOffset(addr));
|
|
3223 }
|
|
3224
|
|
3225 void set_buffer(HeapWord* start) {
|
|
3226 guarantee(use_local_bitmaps, "invariant");
|
|
3227 clear();
|
|
3228
|
|
3229 assert(start != NULL, "invariant");
|
|
3230 _real_start_word = start;
|
|
3231 _real_end_word = start + _gclab_word_size;
|
|
3232
|
|
3233 size_t diff =
|
|
3234 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
|
|
3235 _start_word = start - diff;
|
|
3236
|
|
3237 assert(fields_well_formed(), "invariant");
|
|
3238 }
|
|
3239
|
|
3240 #ifndef PRODUCT
|
|
3241 void verify() {
|
|
3242 // verify that the marks have been propagated
|
|
3243 GCLabBitMapClosure cl(_cm, this);
|
|
3244 iterate(&cl);
|
|
3245 }
|
|
3246 #endif // PRODUCT
|
|
3247
|
|
3248 void retire() {
|
|
3249 guarantee(use_local_bitmaps, "invariant");
|
|
3250 assert(fields_well_formed(), "invariant");
|
|
3251
|
|
3252 if (_start_word != NULL) {
|
|
3253 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
|
|
3254
|
|
3255 // this means that the bitmap was set up for the GCLab
|
|
3256 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
|
|
3257
|
|
3258 mark_bitmap->mostly_disjoint_range_union(this,
|
|
3259 0, // always start from the start of the bitmap
|
|
3260 _start_word,
|
|
3261 size_in_words());
|
|
3262 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
|
|
3263
|
|
3264 #ifndef PRODUCT
|
|
3265 if (use_local_bitmaps && verify_local_bitmaps)
|
|
3266 verify();
|
|
3267 #endif // PRODUCT
|
|
3268 } else {
|
|
3269 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
|
|
3270 }
|
|
3271 }
|
|
3272
|
|
3273 static size_t bitmap_size_in_words() {
|
|
3274 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
|
|
3275 }
|
|
3276 };
|
|
3277
|
|
3278 #ifndef PRODUCT
|
|
3279
|
|
3280 bool GCLabBitMapClosure::do_bit(size_t offset) {
|
|
3281 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
|
|
3282 guarantee(_cm->isMarked(oop(addr)), "it should be!");
|
|
3283 return true;
|
|
3284 }
|
|
3285
|
|
3286 #endif // PRODUCT
|
|
3287
|
|
3288 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
|
3289 private:
|
|
3290 bool _retired;
|
|
3291 bool _during_marking;
|
|
3292 GCLabBitMap _bitmap;
|
|
3293
|
|
3294 public:
|
|
3295 G1ParGCAllocBuffer() :
|
|
3296 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize),
|
|
3297 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
|
|
3298 _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
|
|
3299 _retired(false)
|
|
3300 { }
|
|
3301
|
|
3302 inline bool mark(HeapWord* addr) {
|
|
3303 guarantee(use_local_bitmaps, "invariant");
|
|
3304 assert(_during_marking, "invariant");
|
|
3305 return _bitmap.mark(addr);
|
|
3306 }
|
|
3307
|
|
3308 inline void set_buf(HeapWord* buf) {
|
|
3309 if (use_local_bitmaps && _during_marking)
|
|
3310 _bitmap.set_buffer(buf);
|
|
3311 ParGCAllocBuffer::set_buf(buf);
|
|
3312 _retired = false;
|
|
3313 }
|
|
3314
|
|
3315 inline void retire(bool end_of_gc, bool retain) {
|
|
3316 if (_retired)
|
|
3317 return;
|
|
3318 if (use_local_bitmaps && _during_marking) {
|
|
3319 _bitmap.retire();
|
|
3320 }
|
|
3321 ParGCAllocBuffer::retire(end_of_gc, retain);
|
|
3322 _retired = true;
|
|
3323 }
|
|
3324 };
|
|
3325
|
|
3326
|
|
3327 class G1ParScanThreadState : public StackObj {
|
|
3328 protected:
|
|
3329 G1CollectedHeap* _g1h;
|
|
3330 RefToScanQueue* _refs;
|
|
3331
|
|
3332 typedef GrowableArray<oop*> OverflowQueue;
|
|
3333 OverflowQueue* _overflowed_refs;
|
|
3334
|
|
3335 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
|
3336
|
|
3337 size_t _alloc_buffer_waste;
|
|
3338 size_t _undo_waste;
|
|
3339
|
|
3340 OopsInHeapRegionClosure* _evac_failure_cl;
|
|
3341 G1ParScanHeapEvacClosure* _evac_cl;
|
|
3342 G1ParScanPartialArrayClosure* _partial_scan_cl;
|
|
3343
|
|
3344 int _hash_seed;
|
|
3345 int _queue_num;
|
|
3346
|
|
3347 int _term_attempts;
|
|
3348 #if G1_DETAILED_STATS
|
|
3349 int _pushes, _pops, _steals, _steal_attempts;
|
|
3350 int _overflow_pushes;
|
|
3351 #endif
|
|
3352
|
|
3353 double _start;
|
|
3354 double _start_strong_roots;
|
|
3355 double _strong_roots_time;
|
|
3356 double _start_term;
|
|
3357 double _term_time;
|
|
3358
|
|
3359 // Map from young-age-index (0 == not young, 1 is youngest) to
|
|
3360 // surviving words. base is what we get back from the malloc call
|
|
3361 size_t* _surviving_young_words_base;
|
|
3362 // this points into the array, as we use the first few entries for padding
|
|
3363 size_t* _surviving_young_words;
|
|
3364
|
|
3365 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
|
|
3366
|
|
3367 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
|
3368
|
|
3369 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
|
3370
|
|
3371 public:
|
|
3372 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
|
3373 : _g1h(g1h),
|
|
3374 _refs(g1h->task_queue(queue_num)),
|
|
3375 _hash_seed(17), _queue_num(queue_num),
|
|
3376 _term_attempts(0),
|
|
3377 #if G1_DETAILED_STATS
|
|
3378 _pushes(0), _pops(0), _steals(0),
|
|
3379 _steal_attempts(0), _overflow_pushes(0),
|
|
3380 #endif
|
|
3381 _strong_roots_time(0), _term_time(0),
|
|
3382 _alloc_buffer_waste(0), _undo_waste(0)
|
|
3383 {
|
|
3384 // we allocate G1YoungSurvRateNumRegions plus one entries, since
|
|
3385 // we "sacrifice" entry 0 to keep track of surviving bytes for
|
|
3386 // non-young regions (where the age is -1)
|
|
3387 // We also add a few elements at the beginning and at the end in
|
|
3388 // an attempt to eliminate cache contention
|
|
3389 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
|
|
3390 size_t array_length = PADDING_ELEM_NUM +
|
|
3391 real_length +
|
|
3392 PADDING_ELEM_NUM;
|
|
3393 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
|
|
3394 if (_surviving_young_words_base == NULL)
|
|
3395 vm_exit_out_of_memory(array_length * sizeof(size_t),
|
|
3396 "Not enough space for young surv histo.");
|
|
3397 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
|
3398 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
|
|
3399
|
|
3400 _overflowed_refs = new OverflowQueue(10);
|
|
3401
|
|
3402 _start = os::elapsedTime();
|
|
3403 }
|
|
3404
|
|
3405 ~G1ParScanThreadState() {
|
|
3406 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
|
3407 }
|
|
3408
|
|
3409 RefToScanQueue* refs() { return _refs; }
|
|
3410 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
|
|
3411
|
|
3412 inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
|
3413 return &_alloc_buffers[purpose];
|
|
3414 }
|
|
3415
|
|
3416 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
|
3417 size_t undo_waste() { return _undo_waste; }
|
|
3418
|
|
3419 void push_on_queue(oop* ref) {
|
|
3420 if (!refs()->push(ref)) {
|
|
3421 overflowed_refs()->push(ref);
|
|
3422 IF_G1_DETAILED_STATS(note_overflow_push());
|
|
3423 } else {
|
|
3424 IF_G1_DETAILED_STATS(note_push());
|
|
3425 }
|
|
3426 }
|
|
3427
|
|
3428 void pop_from_queue(oop*& ref) {
|
|
3429 if (!refs()->pop_local(ref)) {
|
|
3430 ref = NULL;
|
|
3431 } else {
|
|
3432 IF_G1_DETAILED_STATS(note_pop());
|
|
3433 }
|
|
3434 }
|
|
3435
|
|
3436 void pop_from_overflow_queue(oop*& ref) {
|
|
3437 ref = overflowed_refs()->pop();
|
|
3438 }
|
|
3439
|
|
3440 int refs_to_scan() { return refs()->size(); }
|
|
3441 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
|
|
3442
|
|
3443 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
|
3444
|
|
3445 HeapWord* obj = NULL;
|
|
3446 if (word_sz * 100 <
|
|
3447 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) *
|
|
3448 ParallelGCBufferWastePct) {
|
|
3449 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
|
3450 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
|
3451 alloc_buf->retire(false, false);
|
|
3452
|
|
3453 HeapWord* buf =
|
|
3454 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize);
|
|
3455 if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
|
3456 // Otherwise.
|
|
3457 alloc_buf->set_buf(buf);
|
|
3458
|
|
3459 obj = alloc_buf->allocate(word_sz);
|
|
3460 assert(obj != NULL, "buffer was definitely big enough...");
|
|
3461 }
|
|
3462 else {
|
|
3463 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
|
3464 }
|
|
3465 return obj;
|
|
3466 }
|
|
3467
|
|
3468 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
|
|
3469 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
|
3470 if (obj != NULL) return obj;
|
|
3471 return allocate_slow(purpose, word_sz);
|
|
3472 }
|
|
3473
|
|
3474 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
|
3475 if (alloc_buffer(purpose)->contains(obj)) {
|
|
3476 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
|
3477 "should contain whole object");
|
|
3478 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
|
3479 }
|
|
3480 else {
|
|
3481 SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
|
|
3482 add_to_undo_waste(word_sz);
|
|
3483 }
|
|
3484 }
|
|
3485
|
|
3486 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
|
3487 _evac_failure_cl = evac_failure_cl;
|
|
3488 }
|
|
3489 OopsInHeapRegionClosure* evac_failure_closure() {
|
|
3490 return _evac_failure_cl;
|
|
3491 }
|
|
3492
|
|
3493 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
|
|
3494 _evac_cl = evac_cl;
|
|
3495 }
|
|
3496
|
|
3497 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
|
|
3498 _partial_scan_cl = partial_scan_cl;
|
|
3499 }
|
|
3500
|
|
3501 int* hash_seed() { return &_hash_seed; }
|
|
3502 int queue_num() { return _queue_num; }
|
|
3503
|
|
3504 int term_attempts() { return _term_attempts; }
|
|
3505 void note_term_attempt() { _term_attempts++; }
|
|
3506
|
|
3507 #if G1_DETAILED_STATS
|
|
3508 int pushes() { return _pushes; }
|
|
3509 int pops() { return _pops; }
|
|
3510 int steals() { return _steals; }
|
|
3511 int steal_attempts() { return _steal_attempts; }
|
|
3512 int overflow_pushes() { return _overflow_pushes; }
|
|
3513
|
|
3514 void note_push() { _pushes++; }
|
|
3515 void note_pop() { _pops++; }
|
|
3516 void note_steal() { _steals++; }
|
|
3517 void note_steal_attempt() { _steal_attempts++; }
|
|
3518 void note_overflow_push() { _overflow_pushes++; }
|
|
3519 #endif
|
|
3520
|
|
3521 void start_strong_roots() {
|
|
3522 _start_strong_roots = os::elapsedTime();
|
|
3523 }
|
|
3524 void end_strong_roots() {
|
|
3525 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
|
3526 }
|
|
3527 double strong_roots_time() { return _strong_roots_time; }
|
|
3528
|
|
3529 void start_term_time() {
|
|
3530 note_term_attempt();
|
|
3531 _start_term = os::elapsedTime();
|
|
3532 }
|
|
3533 void end_term_time() {
|
|
3534 _term_time += (os::elapsedTime() - _start_term);
|
|
3535 }
|
|
3536 double term_time() { return _term_time; }
|
|
3537
|
|
3538 double elapsed() {
|
|
3539 return os::elapsedTime() - _start;
|
|
3540 }
|
|
3541
|
|
3542 size_t* surviving_young_words() {
|
|
3543 // We add on to hide entry 0 which accumulates surviving words for
|
|
3544 // age -1 regions (i.e. non-young ones)
|
|
3545 return _surviving_young_words;
|
|
3546 }
|
|
3547
|
|
3548 void retire_alloc_buffers() {
|
|
3549 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
3550 size_t waste = _alloc_buffers[ap].words_remaining();
|
|
3551 add_to_alloc_buffer_waste(waste);
|
|
3552 _alloc_buffers[ap].retire(true, false);
|
|
3553 }
|
|
3554 }
|
|
3555
|
|
3556 void trim_queue() {
|
|
3557 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
|
3558 oop *ref_to_scan = NULL;
|
|
3559 if (overflowed_refs_to_scan() == 0) {
|
|
3560 pop_from_queue(ref_to_scan);
|
|
3561 } else {
|
|
3562 pop_from_overflow_queue(ref_to_scan);
|
|
3563 }
|
|
3564 if (ref_to_scan != NULL) {
|
|
3565 if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) {
|
|
3566 _partial_scan_cl->do_oop_nv(ref_to_scan);
|
|
3567 } else {
|
|
3568 // Note: we can use "raw" versions of "region_containing" because
|
|
3569 // "obj_to_scan" is definitely in the heap, and is not in a
|
|
3570 // humongous region.
|
|
3571 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
|
3572 _evac_cl->set_region(r);
|
|
3573 _evac_cl->do_oop_nv(ref_to_scan);
|
|
3574 }
|
|
3575 }
|
|
3576 }
|
|
3577 }
|
|
3578 };
|
|
3579
|
|
3580
|
|
3581 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
|
3582 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
|
3583 _par_scan_state(par_scan_state) { }
|
|
3584
|
|
3585 // This closure is applied to the fields of the objects that have just been copied.
|
|
3586 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
|
|
3587 void G1ParScanClosure::do_oop_nv(oop* p) {
|
|
3588 oop obj = *p;
|
|
3589 if (obj != NULL) {
|
|
3590 if (_g1->obj_in_cs(obj)) {
|
|
3591 if (obj->is_forwarded()) {
|
|
3592 *p = obj->forwardee();
|
|
3593 } else {
|
|
3594 _par_scan_state->push_on_queue(p);
|
|
3595 return;
|
|
3596 }
|
|
3597 }
|
|
3598 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
|
3599 }
|
|
3600 }
|
|
3601
|
|
3602 void G1ParCopyHelper::mark_forwardee(oop* p) {
|
|
3603 // This is called _after_ do_oop_work has been called, hence after
|
|
3604 // the object has been relocated to its new location and *p points
|
|
3605 // to its new location.
|
|
3606
|
|
3607 oop thisOop = *p;
|
|
3608 if (thisOop != NULL) {
|
|
3609 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
|
|
3610 "shouldn't still be in the CSet if evacuation didn't fail.");
|
|
3611 HeapWord* addr = (HeapWord*)thisOop;
|
|
3612 if (_g1->is_in_g1_reserved(addr))
|
|
3613 _cm->grayRoot(oop(addr));
|
|
3614 }
|
|
3615 }
|
|
3616
|
|
3617 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
|
3618 size_t word_sz = old->size();
|
|
3619 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
|
|
3620 // +1 to make the -1 indexes valid...
|
|
3621 int young_index = from_region->young_index_in_cset()+1;
|
|
3622 assert( (from_region->is_young() && young_index > 0) ||
|
|
3623 (!from_region->is_young() && young_index == 0), "invariant" );
|
|
3624 G1CollectorPolicy* g1p = _g1->g1_policy();
|
|
3625 markOop m = old->mark();
|
|
3626 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(),
|
|
3627 word_sz);
|
|
3628 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
|
|
3629 oop obj = oop(obj_ptr);
|
|
3630
|
|
3631 if (obj_ptr == NULL) {
|
|
3632 // This will either forward-to-self, or detect that someone else has
|
|
3633 // installed a forwarding pointer.
|
|
3634 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
|
|
3635 return _g1->handle_evacuation_failure_par(cl, old);
|
|
3636 }
|
|
3637
|
|
3638 oop forward_ptr = old->forward_to_atomic(obj);
|
|
3639 if (forward_ptr == NULL) {
|
|
3640 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
|
|
3641 obj->set_mark(m);
|
|
3642 if (g1p->track_object_age(alloc_purpose)) {
|
|
3643 obj->incr_age();
|
|
3644 }
|
|
3645 // preserve "next" mark bit
|
|
3646 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
|
|
3647 if (!use_local_bitmaps ||
|
|
3648 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
|
|
3649 // if we couldn't mark it on the local bitmap (this happens when
|
|
3650 // the object was not allocated in the GCLab), we have to bite
|
|
3651 // the bullet and do the standard parallel mark
|
|
3652 _cm->markAndGrayObjectIfNecessary(obj);
|
|
3653 }
|
|
3654 #if 1
|
|
3655 if (_g1->isMarkedNext(old)) {
|
|
3656 _cm->nextMarkBitMap()->parClear((HeapWord*)old);
|
|
3657 }
|
|
3658 #endif
|
|
3659 }
|
|
3660
|
|
3661 size_t* surv_young_words = _par_scan_state->surviving_young_words();
|
|
3662 surv_young_words[young_index] += word_sz;
|
|
3663
|
|
3664 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
|
3665 arrayOop(old)->set_length(0);
|
|
3666 _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK));
|
|
3667 } else {
|
|
3668 _scanner->set_region(_g1->heap_region_containing(obj));
|
|
3669 obj->oop_iterate_backwards(_scanner);
|
|
3670 }
|
|
3671 } else {
|
|
3672 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
|
3673 obj = forward_ptr;
|
|
3674 }
|
|
3675 return obj;
|
|
3676 }
|
|
3677
|
|
3678 template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
|
|
3679 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) {
|
|
3680 oop obj = *p;
|
|
3681 assert(barrier != G1BarrierRS || obj != NULL,
|
|
3682 "Precondition: G1BarrierRS implies obj is nonNull");
|
|
3683
|
|
3684 if (obj != NULL) {
|
|
3685 if (_g1->obj_in_cs(obj)) {
|
|
3686 #if G1_REM_SET_LOGGING
|
|
3687 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.",
|
|
3688 p, (void*) obj);
|
|
3689 #endif
|
|
3690 if (obj->is_forwarded()) {
|
|
3691 *p = obj->forwardee();
|
|
3692 } else {
|
|
3693 *p = copy_to_survivor_space(obj);
|
|
3694 }
|
|
3695 // When scanning the RS, we only care about objs in CS.
|
|
3696 if (barrier == G1BarrierRS) {
|
|
3697 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
|
3698 }
|
|
3699 }
|
|
3700 // When scanning moved objs, must look at all oops.
|
|
3701 if (barrier == G1BarrierEvac) {
|
|
3702 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
|
3703 }
|
|
3704
|
|
3705 if (do_gen_barrier) {
|
|
3706 par_do_barrier(p);
|
|
3707 }
|
|
3708 }
|
|
3709 }
|
|
3710
|
|
3711 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
|
|
3712
|
|
3713 template <class T> void G1ParScanPartialArrayClosure::process_array_chunk(
|
|
3714 oop obj, int start, int end) {
|
|
3715 // process our set of indices (include header in first chunk)
|
|
3716 assert(start < end, "invariant");
|
|
3717 T* const base = (T*)objArrayOop(obj)->base();
|
|
3718 T* const start_addr = base + start;
|
|
3719 T* const end_addr = base + end;
|
|
3720 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
|
3721 _scanner.set_region(_g1->heap_region_containing(obj));
|
|
3722 obj->oop_iterate(&_scanner, mr);
|
|
3723 }
|
|
3724
|
|
3725 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
|
3726 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
|
|
3727 oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK);
|
|
3728 assert(old->is_objArray(), "must be obj array");
|
|
3729 assert(old->is_forwarded(), "must be forwarded");
|
|
3730 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
|
|
3731
|
|
3732 objArrayOop obj = objArrayOop(old->forwardee());
|
|
3733 assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
|
|
3734 // Process ParGCArrayScanChunk elements now
|
|
3735 // and push the remainder back onto queue
|
|
3736 int start = arrayOop(old)->length();
|
|
3737 int end = obj->length();
|
|
3738 int remainder = end - start;
|
|
3739 assert(start <= end, "just checking");
|
|
3740 if (remainder > 2 * ParGCArrayScanChunk) {
|
|
3741 // Test above combines last partial chunk with a full chunk
|
|
3742 end = start + ParGCArrayScanChunk;
|
|
3743 arrayOop(old)->set_length(end);
|
|
3744 // Push remainder.
|
|
3745 _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK));
|
|
3746 } else {
|
|
3747 // Restore length so that the heap remains parsable in
|
|
3748 // case of evacuation failure.
|
|
3749 arrayOop(old)->set_length(end);
|
|
3750 }
|
|
3751
|
|
3752 // process our set of indices (include header in first chunk)
|
|
3753 process_array_chunk<oop>(obj, start, end);
|
|
3754 oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start);
|
|
3755 oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length
|
|
3756 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
|
3757 _scanner.set_region(_g1->heap_region_containing(obj));
|
|
3758 obj->oop_iterate(&_scanner, mr);
|
|
3759 }
|
|
3760
|
|
3761 int G1ScanAndBalanceClosure::_nq = 0;
|
|
3762
|
|
3763 class G1ParEvacuateFollowersClosure : public VoidClosure {
|
|
3764 protected:
|
|
3765 G1CollectedHeap* _g1h;
|
|
3766 G1ParScanThreadState* _par_scan_state;
|
|
3767 RefToScanQueueSet* _queues;
|
|
3768 ParallelTaskTerminator* _terminator;
|
|
3769
|
|
3770 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
|
|
3771 RefToScanQueueSet* queues() { return _queues; }
|
|
3772 ParallelTaskTerminator* terminator() { return _terminator; }
|
|
3773
|
|
3774 public:
|
|
3775 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
|
|
3776 G1ParScanThreadState* par_scan_state,
|
|
3777 RefToScanQueueSet* queues,
|
|
3778 ParallelTaskTerminator* terminator)
|
|
3779 : _g1h(g1h), _par_scan_state(par_scan_state),
|
|
3780 _queues(queues), _terminator(terminator) {}
|
|
3781
|
|
3782 void do_void() {
|
|
3783 G1ParScanThreadState* pss = par_scan_state();
|
|
3784 while (true) {
|
|
3785 oop* ref_to_scan;
|
|
3786 pss->trim_queue();
|
|
3787 IF_G1_DETAILED_STATS(pss->note_steal_attempt());
|
|
3788 if (queues()->steal(pss->queue_num(),
|
|
3789 pss->hash_seed(),
|
|
3790 ref_to_scan)) {
|
|
3791 IF_G1_DETAILED_STATS(pss->note_steal());
|
|
3792 pss->push_on_queue(ref_to_scan);
|
|
3793 continue;
|
|
3794 }
|
|
3795 pss->start_term_time();
|
|
3796 if (terminator()->offer_termination()) break;
|
|
3797 pss->end_term_time();
|
|
3798 }
|
|
3799 pss->end_term_time();
|
|
3800 pss->retire_alloc_buffers();
|
|
3801 }
|
|
3802 };
|
|
3803
|
|
3804 class G1ParTask : public AbstractGangTask {
|
|
3805 protected:
|
|
3806 G1CollectedHeap* _g1h;
|
|
3807 RefToScanQueueSet *_queues;
|
|
3808 ParallelTaskTerminator _terminator;
|
|
3809
|
|
3810 Mutex _stats_lock;
|
|
3811 Mutex* stats_lock() { return &_stats_lock; }
|
|
3812
|
|
3813 size_t getNCards() {
|
|
3814 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
|
|
3815 / G1BlockOffsetSharedArray::N_bytes;
|
|
3816 }
|
|
3817
|
|
3818 public:
|
|
3819 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
|
|
3820 : AbstractGangTask("G1 collection"),
|
|
3821 _g1h(g1h),
|
|
3822 _queues(task_queues),
|
|
3823 _terminator(workers, _queues),
|
|
3824 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
|
|
3825 {}
|
|
3826
|
|
3827 RefToScanQueueSet* queues() { return _queues; }
|
|
3828
|
|
3829 RefToScanQueue *work_queue(int i) {
|
|
3830 return queues()->queue(i);
|
|
3831 }
|
|
3832
|
|
3833 void work(int i) {
|
|
3834 ResourceMark rm;
|
|
3835 HandleMark hm;
|
|
3836
|
|
3837 G1ParScanThreadState pss(_g1h, i);
|
|
3838 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
|
|
3839 G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss);
|
|
3840 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
|
|
3841
|
|
3842 pss.set_evac_closure(&scan_evac_cl);
|
|
3843 pss.set_evac_failure_closure(&evac_failure_cl);
|
|
3844 pss.set_partial_scan_closure(&partial_scan_cl);
|
|
3845
|
|
3846 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
|
|
3847 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
|
|
3848 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
|
|
3849 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
|
|
3850 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
|
|
3851 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
|
|
3852
|
|
3853 OopsInHeapRegionClosure *scan_root_cl;
|
|
3854 OopsInHeapRegionClosure *scan_perm_cl;
|
|
3855 OopsInHeapRegionClosure *scan_so_cl;
|
|
3856
|
|
3857 if (_g1h->g1_policy()->should_initiate_conc_mark()) {
|
|
3858 scan_root_cl = &scan_mark_root_cl;
|
|
3859 scan_perm_cl = &scan_mark_perm_cl;
|
|
3860 scan_so_cl = &scan_mark_heap_rs_cl;
|
|
3861 } else {
|
|
3862 scan_root_cl = &only_scan_root_cl;
|
|
3863 scan_perm_cl = &only_scan_perm_cl;
|
|
3864 scan_so_cl = &only_scan_heap_rs_cl;
|
|
3865 }
|
|
3866
|
|
3867 pss.start_strong_roots();
|
|
3868 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
|
|
3869 SharedHeap::SO_AllClasses,
|
|
3870 scan_root_cl,
|
|
3871 &only_scan_heap_rs_cl,
|
|
3872 scan_so_cl,
|
|
3873 scan_perm_cl,
|
|
3874 i);
|
|
3875 pss.end_strong_roots();
|
|
3876 {
|
|
3877 double start = os::elapsedTime();
|
|
3878 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
|
|
3879 evac.do_void();
|
|
3880 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
|
|
3881 double term_ms = pss.term_time()*1000.0;
|
|
3882 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
|
|
3883 _g1h->g1_policy()->record_termination_time(i, term_ms);
|
|
3884 }
|
|
3885 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
|
|
3886
|
|
3887 // Clean up any par-expanded rem sets.
|
|
3888 HeapRegionRemSet::par_cleanup();
|
|
3889
|
|
3890 MutexLocker x(stats_lock());
|
|
3891 if (ParallelGCVerbose) {
|
|
3892 gclog_or_tty->print("Thread %d complete:\n", i);
|
|
3893 #if G1_DETAILED_STATS
|
|
3894 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n",
|
|
3895 pss.pushes(),
|
|
3896 pss.pops(),
|
|
3897 pss.overflow_pushes(),
|
|
3898 pss.steals(),
|
|
3899 pss.steal_attempts());
|
|
3900 #endif
|
|
3901 double elapsed = pss.elapsed();
|
|
3902 double strong_roots = pss.strong_roots_time();
|
|
3903 double term = pss.term_time();
|
|
3904 gclog_or_tty->print(" Elapsed: %7.2f ms.\n"
|
|
3905 " Strong roots: %7.2f ms (%6.2f%%)\n"
|
|
3906 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n",
|
|
3907 elapsed * 1000.0,
|
|
3908 strong_roots * 1000.0, (strong_roots*100.0/elapsed),
|
|
3909 term * 1000.0, (term*100.0/elapsed),
|
|
3910 pss.term_attempts());
|
|
3911 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
|
|
3912 gclog_or_tty->print(" Waste: %8dK\n"
|
|
3913 " Alloc Buffer: %8dK\n"
|
|
3914 " Undo: %8dK\n",
|
|
3915 (total_waste * HeapWordSize) / K,
|
|
3916 (pss.alloc_buffer_waste() * HeapWordSize) / K,
|
|
3917 (pss.undo_waste() * HeapWordSize) / K);
|
|
3918 }
|
|
3919
|
|
3920 assert(pss.refs_to_scan() == 0, "Task queue should be empty");
|
|
3921 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
|
|
3922 }
|
|
3923 };
|
|
3924
|
|
3925 // *** Common G1 Evacuation Stuff
|
|
3926
|
|
3927 class G1CountClosure: public OopsInHeapRegionClosure {
|
|
3928 public:
|
|
3929 int n;
|
|
3930 G1CountClosure() : n(0) {}
|
|
3931 void do_oop(narrowOop* p) {
|
|
3932 guarantee(false, "NYI");
|
|
3933 }
|
|
3934 void do_oop(oop* p) {
|
|
3935 oop obj = *p;
|
|
3936 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
|
|
3937 "Rem set closure called on non-rem-set pointer.");
|
|
3938 n++;
|
|
3939 }
|
|
3940 };
|
|
3941
|
|
3942 static G1CountClosure count_closure;
|
|
3943
|
|
3944 void
|
|
3945 G1CollectedHeap::
|
|
3946 g1_process_strong_roots(bool collecting_perm_gen,
|
|
3947 SharedHeap::ScanningOption so,
|
|
3948 OopClosure* scan_non_heap_roots,
|
|
3949 OopsInHeapRegionClosure* scan_rs,
|
|
3950 OopsInHeapRegionClosure* scan_so,
|
|
3951 OopsInGenClosure* scan_perm,
|
|
3952 int worker_i) {
|
|
3953 // First scan the strong roots, including the perm gen.
|
|
3954 double ext_roots_start = os::elapsedTime();
|
|
3955 double closure_app_time_sec = 0.0;
|
|
3956
|
|
3957 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
|
3958 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
|
|
3959 buf_scan_perm.set_generation(perm_gen());
|
|
3960
|
|
3961 process_strong_roots(collecting_perm_gen, so,
|
|
3962 &buf_scan_non_heap_roots,
|
|
3963 &buf_scan_perm);
|
|
3964 // Finish up any enqueued closure apps.
|
|
3965 buf_scan_non_heap_roots.done();
|
|
3966 buf_scan_perm.done();
|
|
3967 double ext_roots_end = os::elapsedTime();
|
|
3968 g1_policy()->reset_obj_copy_time(worker_i);
|
|
3969 double obj_copy_time_sec =
|
|
3970 buf_scan_non_heap_roots.closure_app_seconds() +
|
|
3971 buf_scan_perm.closure_app_seconds();
|
|
3972 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
|
|
3973 double ext_root_time_ms =
|
|
3974 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
|
|
3975 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
|
|
3976
|
|
3977 // Scan strong roots in mark stack.
|
|
3978 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
|
|
3979 concurrent_mark()->oops_do(scan_non_heap_roots);
|
|
3980 }
|
|
3981 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
|
|
3982 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
|
|
3983
|
|
3984 // XXX What should this be doing in the parallel case?
|
|
3985 g1_policy()->record_collection_pause_end_CH_strong_roots();
|
|
3986 if (G1VerifyRemSet) {
|
|
3987 // :::: FIXME ::::
|
|
3988 // The stupid remembered set doesn't know how to filter out dead
|
|
3989 // objects, which the smart one does, and so when it is created
|
|
3990 // and then compared the number of entries in each differs and
|
|
3991 // the verification code fails.
|
|
3992 guarantee(false, "verification code is broken, see note");
|
|
3993
|
|
3994 // Let's make sure that the current rem set agrees with the stupidest
|
|
3995 // one possible!
|
|
3996 bool refs_enabled = ref_processor()->discovery_enabled();
|
|
3997 if (refs_enabled) ref_processor()->disable_discovery();
|
|
3998 StupidG1RemSet stupid(this);
|
|
3999 count_closure.n = 0;
|
|
4000 stupid.oops_into_collection_set_do(&count_closure, worker_i);
|
|
4001 int stupid_n = count_closure.n;
|
|
4002 count_closure.n = 0;
|
|
4003 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i);
|
|
4004 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ.");
|
|
4005 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n);
|
|
4006 if (refs_enabled) ref_processor()->enable_discovery();
|
|
4007 }
|
|
4008 if (scan_so != NULL) {
|
|
4009 scan_scan_only_set(scan_so, worker_i);
|
|
4010 }
|
|
4011 // Now scan the complement of the collection set.
|
|
4012 if (scan_rs != NULL) {
|
|
4013 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
|
|
4014 }
|
|
4015 // Finish with the ref_processor roots.
|
|
4016 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
|
|
4017 ref_processor()->oops_do(scan_non_heap_roots);
|
|
4018 }
|
|
4019 g1_policy()->record_collection_pause_end_G1_strong_roots();
|
|
4020 _process_strong_tasks->all_tasks_completed();
|
|
4021 }
|
|
4022
|
|
4023 void
|
|
4024 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
|
|
4025 OopsInHeapRegionClosure* oc,
|
|
4026 int worker_i) {
|
|
4027 HeapWord* startAddr = r->bottom();
|
|
4028 HeapWord* endAddr = r->used_region().end();
|
|
4029
|
|
4030 oc->set_region(r);
|
|
4031
|
|
4032 HeapWord* p = r->bottom();
|
|
4033 HeapWord* t = r->top();
|
|
4034 guarantee( p == r->next_top_at_mark_start(), "invariant" );
|
|
4035 while (p < t) {
|
|
4036 oop obj = oop(p);
|
|
4037 p += obj->oop_iterate(oc);
|
|
4038 }
|
|
4039 }
|
|
4040
|
|
4041 void
|
|
4042 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
|
|
4043 int worker_i) {
|
|
4044 double start = os::elapsedTime();
|
|
4045
|
|
4046 BufferingOopsInHeapRegionClosure boc(oc);
|
|
4047
|
|
4048 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
|
|
4049 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
|
|
4050
|
|
4051 OopsInHeapRegionClosure *foc;
|
|
4052 if (g1_policy()->should_initiate_conc_mark())
|
|
4053 foc = &scan_and_mark;
|
|
4054 else
|
|
4055 foc = &scan_only;
|
|
4056
|
|
4057 HeapRegion* hr;
|
|
4058 int n = 0;
|
|
4059 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
|
|
4060 scan_scan_only_region(hr, foc, worker_i);
|
|
4061 ++n;
|
|
4062 }
|
|
4063 boc.done();
|
|
4064
|
|
4065 double closure_app_s = boc.closure_app_seconds();
|
|
4066 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
|
|
4067 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
|
|
4068 g1_policy()->record_scan_only_time(worker_i, ms, n);
|
|
4069 }
|
|
4070
|
|
4071 void
|
|
4072 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
|
|
4073 OopClosure* non_root_closure) {
|
|
4074 SharedHeap::process_weak_roots(root_closure, non_root_closure);
|
|
4075 }
|
|
4076
|
|
4077
|
|
4078 class SaveMarksClosure: public HeapRegionClosure {
|
|
4079 public:
|
|
4080 bool doHeapRegion(HeapRegion* r) {
|
|
4081 r->save_marks();
|
|
4082 return false;
|
|
4083 }
|
|
4084 };
|
|
4085
|
|
4086 void G1CollectedHeap::save_marks() {
|
|
4087 if (ParallelGCThreads == 0) {
|
|
4088 SaveMarksClosure sm;
|
|
4089 heap_region_iterate(&sm);
|
|
4090 }
|
|
4091 // We do this even in the parallel case
|
|
4092 perm_gen()->save_marks();
|
|
4093 }
|
|
4094
|
|
4095 void G1CollectedHeap::evacuate_collection_set() {
|
|
4096 set_evacuation_failed(false);
|
|
4097
|
|
4098 g1_rem_set()->prepare_for_oops_into_collection_set_do();
|
|
4099 concurrent_g1_refine()->set_use_cache(false);
|
|
4100 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
|
|
4101
|
|
4102 set_par_threads(n_workers);
|
|
4103 G1ParTask g1_par_task(this, n_workers, _task_queues);
|
|
4104
|
|
4105 init_for_evac_failure(NULL);
|
|
4106
|
|
4107 change_strong_roots_parity(); // In preparation for parallel strong roots.
|
|
4108 rem_set()->prepare_for_younger_refs_iterate(true);
|
|
4109 double start_par = os::elapsedTime();
|
|
4110
|
|
4111 if (ParallelGCThreads > 0) {
|
|
4112 // The individual threads will set their evac-failure closures.
|
|
4113 workers()->run_task(&g1_par_task);
|
|
4114 } else {
|
|
4115 g1_par_task.work(0);
|
|
4116 }
|
|
4117
|
|
4118 double par_time = (os::elapsedTime() - start_par) * 1000.0;
|
|
4119 g1_policy()->record_par_time(par_time);
|
|
4120 set_par_threads(0);
|
|
4121 // Is this the right thing to do here? We don't save marks
|
|
4122 // on individual heap regions when we allocate from
|
|
4123 // them in parallel, so this seems like the correct place for this.
|
|
4124 all_alloc_regions_note_end_of_copying();
|
|
4125 {
|
|
4126 G1IsAliveClosure is_alive(this);
|
|
4127 G1KeepAliveClosure keep_alive(this);
|
|
4128 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
|
|
4129 }
|
|
4130
|
|
4131 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
|
4132 concurrent_g1_refine()->set_use_cache(true);
|
|
4133
|
|
4134 finalize_for_evac_failure();
|
|
4135
|
|
4136 // Must do this before removing self-forwarding pointers, which clears
|
|
4137 // the per-region evac-failure flags.
|
|
4138 concurrent_mark()->complete_marking_in_collection_set();
|
|
4139
|
|
4140 if (evacuation_failed()) {
|
|
4141 remove_self_forwarding_pointers();
|
|
4142
|
|
4143 if (PrintGCDetails) {
|
|
4144 gclog_or_tty->print(" (evacuation failed)");
|
|
4145 } else if (PrintGC) {
|
|
4146 gclog_or_tty->print("--");
|
|
4147 }
|
|
4148 }
|
|
4149
|
|
4150 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
|
4151 }
|
|
4152
|
|
4153 void G1CollectedHeap::free_region(HeapRegion* hr) {
|
|
4154 size_t pre_used = 0;
|
|
4155 size_t cleared_h_regions = 0;
|
|
4156 size_t freed_regions = 0;
|
|
4157 UncleanRegionList local_list;
|
|
4158
|
|
4159 HeapWord* start = hr->bottom();
|
|
4160 HeapWord* end = hr->prev_top_at_mark_start();
|
|
4161 size_t used_bytes = hr->used();
|
|
4162 size_t live_bytes = hr->max_live_bytes();
|
|
4163 if (used_bytes > 0) {
|
|
4164 guarantee( live_bytes <= used_bytes, "invariant" );
|
|
4165 } else {
|
|
4166 guarantee( live_bytes == 0, "invariant" );
|
|
4167 }
|
|
4168
|
|
4169 size_t garbage_bytes = used_bytes - live_bytes;
|
|
4170 if (garbage_bytes > 0)
|
|
4171 g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
|
|
4172
|
|
4173 free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
|
|
4174 &local_list);
|
|
4175 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
|
|
4176 &local_list);
|
|
4177 }
|
|
4178
|
|
4179 void
|
|
4180 G1CollectedHeap::free_region_work(HeapRegion* hr,
|
|
4181 size_t& pre_used,
|
|
4182 size_t& cleared_h_regions,
|
|
4183 size_t& freed_regions,
|
|
4184 UncleanRegionList* list,
|
|
4185 bool par) {
|
|
4186 assert(!hr->popular(), "should not free popular regions");
|
|
4187 pre_used += hr->used();
|
|
4188 if (hr->isHumongous()) {
|
|
4189 assert(hr->startsHumongous(),
|
|
4190 "Only the start of a humongous region should be freed.");
|
|
4191 int ind = _hrs->find(hr);
|
|
4192 assert(ind != -1, "Should have an index.");
|
|
4193 // Clear the start region.
|
|
4194 hr->hr_clear(par, true /*clear_space*/);
|
|
4195 list->insert_before_head(hr);
|
|
4196 cleared_h_regions++;
|
|
4197 freed_regions++;
|
|
4198 // Clear any continued regions.
|
|
4199 ind++;
|
|
4200 while ((size_t)ind < n_regions()) {
|
|
4201 HeapRegion* hrc = _hrs->at(ind);
|
|
4202 if (!hrc->continuesHumongous()) break;
|
|
4203 // Otherwise, does continue the H region.
|
|
4204 assert(hrc->humongous_start_region() == hr, "Huh?");
|
|
4205 hrc->hr_clear(par, true /*clear_space*/);
|
|
4206 cleared_h_regions++;
|
|
4207 freed_regions++;
|
|
4208 list->insert_before_head(hrc);
|
|
4209 ind++;
|
|
4210 }
|
|
4211 } else {
|
|
4212 hr->hr_clear(par, true /*clear_space*/);
|
|
4213 list->insert_before_head(hr);
|
|
4214 freed_regions++;
|
|
4215 // If we're using clear2, this should not be enabled.
|
|
4216 // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
|
|
4217 }
|
|
4218 }
|
|
4219
|
|
4220 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
|
|
4221 size_t cleared_h_regions,
|
|
4222 size_t freed_regions,
|
|
4223 UncleanRegionList* list) {
|
|
4224 if (list != NULL && list->sz() > 0) {
|
|
4225 prepend_region_list_on_unclean_list(list);
|
|
4226 }
|
|
4227 // Acquire a lock, if we're parallel, to update possibly-shared
|
|
4228 // variables.
|
|
4229 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
|
|
4230 {
|
|
4231 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
|
|
4232 _summary_bytes_used -= pre_used;
|
|
4233 _num_humongous_regions -= (int) cleared_h_regions;
|
|
4234 _free_regions += freed_regions;
|
|
4235 }
|
|
4236 }
|
|
4237
|
|
4238
|
|
4239 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
|
|
4240 while (list != NULL) {
|
|
4241 guarantee( list->is_young(), "invariant" );
|
|
4242
|
|
4243 HeapWord* bottom = list->bottom();
|
|
4244 HeapWord* end = list->end();
|
|
4245 MemRegion mr(bottom, end);
|
|
4246 ct_bs->dirty(mr);
|
|
4247
|
|
4248 list = list->get_next_young_region();
|
|
4249 }
|
|
4250 }
|
|
4251
|
|
4252 void G1CollectedHeap::cleanUpCardTable() {
|
|
4253 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
|
|
4254 double start = os::elapsedTime();
|
|
4255
|
|
4256 ct_bs->clear(_g1_committed);
|
|
4257
|
|
4258 // now, redirty the cards of the scan-only and survivor regions
|
|
4259 // (it seemed faster to do it this way, instead of iterating over
|
|
4260 // all regions and then clearing / dirtying as approprite)
|
|
4261 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
|
|
4262 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
|
|
4263
|
|
4264 double elapsed = os::elapsedTime() - start;
|
|
4265 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
|
|
4266 }
|
|
4267
|
|
4268
|
|
4269 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
|
|
4270 // First do any popular regions.
|
|
4271 HeapRegion* hr;
|
|
4272 while ((hr = popular_region_to_evac()) != NULL) {
|
|
4273 evac_popular_region(hr);
|
|
4274 }
|
|
4275 // Now do heuristic pauses.
|
|
4276 if (g1_policy()->should_do_collection_pause(word_size)) {
|
|
4277 do_collection_pause();
|
|
4278 }
|
|
4279 }
|
|
4280
|
|
4281 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
|
4282 double young_time_ms = 0.0;
|
|
4283 double non_young_time_ms = 0.0;
|
|
4284
|
|
4285 G1CollectorPolicy* policy = g1_policy();
|
|
4286
|
|
4287 double start_sec = os::elapsedTime();
|
|
4288 bool non_young = true;
|
|
4289
|
|
4290 HeapRegion* cur = cs_head;
|
|
4291 int age_bound = -1;
|
|
4292 size_t rs_lengths = 0;
|
|
4293
|
|
4294 while (cur != NULL) {
|
|
4295 if (non_young) {
|
|
4296 if (cur->is_young()) {
|
|
4297 double end_sec = os::elapsedTime();
|
|
4298 double elapsed_ms = (end_sec - start_sec) * 1000.0;
|
|
4299 non_young_time_ms += elapsed_ms;
|
|
4300
|
|
4301 start_sec = os::elapsedTime();
|
|
4302 non_young = false;
|
|
4303 }
|
|
4304 } else {
|
|
4305 if (!cur->is_on_free_list()) {
|
|
4306 double end_sec = os::elapsedTime();
|
|
4307 double elapsed_ms = (end_sec - start_sec) * 1000.0;
|
|
4308 young_time_ms += elapsed_ms;
|
|
4309
|
|
4310 start_sec = os::elapsedTime();
|
|
4311 non_young = true;
|
|
4312 }
|
|
4313 }
|
|
4314
|
|
4315 rs_lengths += cur->rem_set()->occupied();
|
|
4316
|
|
4317 HeapRegion* next = cur->next_in_collection_set();
|
|
4318 assert(cur->in_collection_set(), "bad CS");
|
|
4319 cur->set_next_in_collection_set(NULL);
|
|
4320 cur->set_in_collection_set(false);
|
|
4321
|
|
4322 if (cur->is_young()) {
|
|
4323 int index = cur->young_index_in_cset();
|
|
4324 guarantee( index != -1, "invariant" );
|
|
4325 guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
|
|
4326 size_t words_survived = _surviving_young_words[index];
|
|
4327 cur->record_surv_words_in_group(words_survived);
|
|
4328 } else {
|
|
4329 int index = cur->young_index_in_cset();
|
|
4330 guarantee( index == -1, "invariant" );
|
|
4331 }
|
|
4332
|
|
4333 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
|
|
4334 (!cur->is_young() && cur->young_index_in_cset() == -1),
|
|
4335 "invariant" );
|
|
4336
|
|
4337 if (!cur->evacuation_failed()) {
|
|
4338 // And the region is empty.
|
|
4339 assert(!cur->is_empty(),
|
|
4340 "Should not have empty regions in a CS.");
|
|
4341 free_region(cur);
|
|
4342 } else {
|
|
4343 guarantee( !cur->is_scan_only(), "should not be scan only" );
|
|
4344 cur->uninstall_surv_rate_group();
|
|
4345 if (cur->is_young())
|
|
4346 cur->set_young_index_in_cset(-1);
|
|
4347 cur->set_not_young();
|
|
4348 cur->set_evacuation_failed(false);
|
|
4349 }
|
|
4350 cur = next;
|
|
4351 }
|
|
4352
|
|
4353 policy->record_max_rs_lengths(rs_lengths);
|
|
4354 policy->cset_regions_freed();
|
|
4355
|
|
4356 double end_sec = os::elapsedTime();
|
|
4357 double elapsed_ms = (end_sec - start_sec) * 1000.0;
|
|
4358 if (non_young)
|
|
4359 non_young_time_ms += elapsed_ms;
|
|
4360 else
|
|
4361 young_time_ms += elapsed_ms;
|
|
4362
|
|
4363 policy->record_young_free_cset_time_ms(young_time_ms);
|
|
4364 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
|
|
4365 }
|
|
4366
|
|
4367 HeapRegion*
|
|
4368 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
|
|
4369 assert(ZF_mon->owned_by_self(), "Precondition");
|
|
4370 HeapRegion* res = pop_unclean_region_list_locked();
|
|
4371 if (res != NULL) {
|
|
4372 assert(!res->continuesHumongous() &&
|
|
4373 res->zero_fill_state() != HeapRegion::Allocated,
|
|
4374 "Only free regions on unclean list.");
|
|
4375 if (zero_filled) {
|
|
4376 res->ensure_zero_filled_locked();
|
|
4377 res->set_zero_fill_allocated();
|
|
4378 }
|
|
4379 }
|
|
4380 return res;
|
|
4381 }
|
|
4382
|
|
4383 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
|
|
4384 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4385 return alloc_region_from_unclean_list_locked(zero_filled);
|
|
4386 }
|
|
4387
|
|
4388 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
|
|
4389 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4390 put_region_on_unclean_list_locked(r);
|
|
4391 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
|
|
4392 }
|
|
4393
|
|
4394 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
|
|
4395 MutexLockerEx x(Cleanup_mon);
|
|
4396 set_unclean_regions_coming_locked(b);
|
|
4397 }
|
|
4398
|
|
4399 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
|
|
4400 assert(Cleanup_mon->owned_by_self(), "Precondition");
|
|
4401 _unclean_regions_coming = b;
|
|
4402 // Wake up mutator threads that might be waiting for completeCleanup to
|
|
4403 // finish.
|
|
4404 if (!b) Cleanup_mon->notify_all();
|
|
4405 }
|
|
4406
|
|
4407 void G1CollectedHeap::wait_for_cleanup_complete() {
|
|
4408 MutexLockerEx x(Cleanup_mon);
|
|
4409 wait_for_cleanup_complete_locked();
|
|
4410 }
|
|
4411
|
|
4412 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
|
|
4413 assert(Cleanup_mon->owned_by_self(), "precondition");
|
|
4414 while (_unclean_regions_coming) {
|
|
4415 Cleanup_mon->wait();
|
|
4416 }
|
|
4417 }
|
|
4418
|
|
4419 void
|
|
4420 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
|
|
4421 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4422 _unclean_region_list.insert_before_head(r);
|
|
4423 }
|
|
4424
|
|
4425 void
|
|
4426 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
|
|
4427 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4428 prepend_region_list_on_unclean_list_locked(list);
|
|
4429 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
|
|
4430 }
|
|
4431
|
|
4432 void
|
|
4433 G1CollectedHeap::
|
|
4434 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
|
|
4435 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4436 _unclean_region_list.prepend_list(list);
|
|
4437 }
|
|
4438
|
|
4439 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
|
|
4440 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4441 HeapRegion* res = _unclean_region_list.pop();
|
|
4442 if (res != NULL) {
|
|
4443 // Inform ZF thread that there's a new unclean head.
|
|
4444 if (_unclean_region_list.hd() != NULL && should_zf())
|
|
4445 ZF_mon->notify_all();
|
|
4446 }
|
|
4447 return res;
|
|
4448 }
|
|
4449
|
|
4450 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
|
|
4451 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4452 return _unclean_region_list.hd();
|
|
4453 }
|
|
4454
|
|
4455
|
|
4456 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
|
|
4457 assert(ZF_mon->owned_by_self(), "Precondition");
|
|
4458 HeapRegion* r = peek_unclean_region_list_locked();
|
|
4459 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
|
|
4460 // Result of below must be equal to "r", since we hold the lock.
|
|
4461 (void)pop_unclean_region_list_locked();
|
|
4462 put_free_region_on_list_locked(r);
|
|
4463 return true;
|
|
4464 } else {
|
|
4465 return false;
|
|
4466 }
|
|
4467 }
|
|
4468
|
|
4469 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
|
|
4470 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4471 return move_cleaned_region_to_free_list_locked();
|
|
4472 }
|
|
4473
|
|
4474
|
|
4475 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
|
|
4476 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4477 assert(_free_region_list_size == free_region_list_length(), "Inv");
|
|
4478 assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
|
|
4479 "Regions on free list must be zero filled");
|
|
4480 assert(!r->isHumongous(), "Must not be humongous.");
|
|
4481 assert(r->is_empty(), "Better be empty");
|
|
4482 assert(!r->is_on_free_list(),
|
|
4483 "Better not already be on free list");
|
|
4484 assert(!r->is_on_unclean_list(),
|
|
4485 "Better not already be on unclean list");
|
|
4486 r->set_on_free_list(true);
|
|
4487 r->set_next_on_free_list(_free_region_list);
|
|
4488 _free_region_list = r;
|
|
4489 _free_region_list_size++;
|
|
4490 assert(_free_region_list_size == free_region_list_length(), "Inv");
|
|
4491 }
|
|
4492
|
|
4493 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
|
|
4494 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4495 put_free_region_on_list_locked(r);
|
|
4496 }
|
|
4497
|
|
4498 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
|
|
4499 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4500 assert(_free_region_list_size == free_region_list_length(), "Inv");
|
|
4501 HeapRegion* res = _free_region_list;
|
|
4502 if (res != NULL) {
|
|
4503 _free_region_list = res->next_from_free_list();
|
|
4504 _free_region_list_size--;
|
|
4505 res->set_on_free_list(false);
|
|
4506 res->set_next_on_free_list(NULL);
|
|
4507 assert(_free_region_list_size == free_region_list_length(), "Inv");
|
|
4508 }
|
|
4509 return res;
|
|
4510 }
|
|
4511
|
|
4512
|
|
4513 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
|
|
4514 // By self, or on behalf of self.
|
|
4515 assert(Heap_lock->is_locked(), "Precondition");
|
|
4516 HeapRegion* res = NULL;
|
|
4517 bool first = true;
|
|
4518 while (res == NULL) {
|
|
4519 if (zero_filled || !first) {
|
|
4520 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4521 res = pop_free_region_list_locked();
|
|
4522 if (res != NULL) {
|
|
4523 assert(!res->zero_fill_is_allocated(),
|
|
4524 "No allocated regions on free list.");
|
|
4525 res->set_zero_fill_allocated();
|
|
4526 } else if (!first) {
|
|
4527 break; // We tried both, time to return NULL.
|
|
4528 }
|
|
4529 }
|
|
4530
|
|
4531 if (res == NULL) {
|
|
4532 res = alloc_region_from_unclean_list(zero_filled);
|
|
4533 }
|
|
4534 assert(res == NULL ||
|
|
4535 !zero_filled ||
|
|
4536 res->zero_fill_is_allocated(),
|
|
4537 "We must have allocated the region we're returning");
|
|
4538 first = false;
|
|
4539 }
|
|
4540 return res;
|
|
4541 }
|
|
4542
|
|
4543 void G1CollectedHeap::remove_allocated_regions_from_lists() {
|
|
4544 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4545 {
|
|
4546 HeapRegion* prev = NULL;
|
|
4547 HeapRegion* cur = _unclean_region_list.hd();
|
|
4548 while (cur != NULL) {
|
|
4549 HeapRegion* next = cur->next_from_unclean_list();
|
|
4550 if (cur->zero_fill_is_allocated()) {
|
|
4551 // Remove from the list.
|
|
4552 if (prev == NULL) {
|
|
4553 (void)_unclean_region_list.pop();
|
|
4554 } else {
|
|
4555 _unclean_region_list.delete_after(prev);
|
|
4556 }
|
|
4557 cur->set_on_unclean_list(false);
|
|
4558 cur->set_next_on_unclean_list(NULL);
|
|
4559 } else {
|
|
4560 prev = cur;
|
|
4561 }
|
|
4562 cur = next;
|
|
4563 }
|
|
4564 assert(_unclean_region_list.sz() == unclean_region_list_length(),
|
|
4565 "Inv");
|
|
4566 }
|
|
4567
|
|
4568 {
|
|
4569 HeapRegion* prev = NULL;
|
|
4570 HeapRegion* cur = _free_region_list;
|
|
4571 while (cur != NULL) {
|
|
4572 HeapRegion* next = cur->next_from_free_list();
|
|
4573 if (cur->zero_fill_is_allocated()) {
|
|
4574 // Remove from the list.
|
|
4575 if (prev == NULL) {
|
|
4576 _free_region_list = cur->next_from_free_list();
|
|
4577 } else {
|
|
4578 prev->set_next_on_free_list(cur->next_from_free_list());
|
|
4579 }
|
|
4580 cur->set_on_free_list(false);
|
|
4581 cur->set_next_on_free_list(NULL);
|
|
4582 _free_region_list_size--;
|
|
4583 } else {
|
|
4584 prev = cur;
|
|
4585 }
|
|
4586 cur = next;
|
|
4587 }
|
|
4588 assert(_free_region_list_size == free_region_list_length(), "Inv");
|
|
4589 }
|
|
4590 }
|
|
4591
|
|
4592 bool G1CollectedHeap::verify_region_lists() {
|
|
4593 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4594 return verify_region_lists_locked();
|
|
4595 }
|
|
4596
|
|
4597 bool G1CollectedHeap::verify_region_lists_locked() {
|
|
4598 HeapRegion* unclean = _unclean_region_list.hd();
|
|
4599 while (unclean != NULL) {
|
|
4600 guarantee(unclean->is_on_unclean_list(), "Well, it is!");
|
|
4601 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
|
|
4602 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
|
|
4603 "Everything else is possible.");
|
|
4604 unclean = unclean->next_from_unclean_list();
|
|
4605 }
|
|
4606 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
|
|
4607
|
|
4608 HeapRegion* free_r = _free_region_list;
|
|
4609 while (free_r != NULL) {
|
|
4610 assert(free_r->is_on_free_list(), "Well, it is!");
|
|
4611 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
|
|
4612 switch (free_r->zero_fill_state()) {
|
|
4613 case HeapRegion::NotZeroFilled:
|
|
4614 case HeapRegion::ZeroFilling:
|
|
4615 guarantee(false, "Should not be on free list.");
|
|
4616 break;
|
|
4617 default:
|
|
4618 // Everything else is possible.
|
|
4619 break;
|
|
4620 }
|
|
4621 free_r = free_r->next_from_free_list();
|
|
4622 }
|
|
4623 guarantee(_free_region_list_size == free_region_list_length(), "Inv");
|
|
4624 // If we didn't do an assertion...
|
|
4625 return true;
|
|
4626 }
|
|
4627
|
|
4628 size_t G1CollectedHeap::free_region_list_length() {
|
|
4629 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4630 size_t len = 0;
|
|
4631 HeapRegion* cur = _free_region_list;
|
|
4632 while (cur != NULL) {
|
|
4633 len++;
|
|
4634 cur = cur->next_from_free_list();
|
|
4635 }
|
|
4636 return len;
|
|
4637 }
|
|
4638
|
|
4639 size_t G1CollectedHeap::unclean_region_list_length() {
|
|
4640 assert(ZF_mon->owned_by_self(), "precondition.");
|
|
4641 return _unclean_region_list.length();
|
|
4642 }
|
|
4643
|
|
4644 size_t G1CollectedHeap::n_regions() {
|
|
4645 return _hrs->length();
|
|
4646 }
|
|
4647
|
|
4648 size_t G1CollectedHeap::max_regions() {
|
|
4649 return
|
|
4650 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
|
|
4651 HeapRegion::GrainBytes;
|
|
4652 }
|
|
4653
|
|
4654 size_t G1CollectedHeap::free_regions() {
|
|
4655 /* Possibly-expensive assert.
|
|
4656 assert(_free_regions == count_free_regions(),
|
|
4657 "_free_regions is off.");
|
|
4658 */
|
|
4659 return _free_regions;
|
|
4660 }
|
|
4661
|
|
4662 bool G1CollectedHeap::should_zf() {
|
|
4663 return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
|
|
4664 }
|
|
4665
|
|
4666 class RegionCounter: public HeapRegionClosure {
|
|
4667 size_t _n;
|
|
4668 public:
|
|
4669 RegionCounter() : _n(0) {}
|
|
4670 bool doHeapRegion(HeapRegion* r) {
|
|
4671 if (r->is_empty() && !r->popular()) {
|
|
4672 assert(!r->isHumongous(), "H regions should not be empty.");
|
|
4673 _n++;
|
|
4674 }
|
|
4675 return false;
|
|
4676 }
|
|
4677 int res() { return (int) _n; }
|
|
4678 };
|
|
4679
|
|
4680 size_t G1CollectedHeap::count_free_regions() {
|
|
4681 RegionCounter rc;
|
|
4682 heap_region_iterate(&rc);
|
|
4683 size_t n = rc.res();
|
|
4684 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
|
|
4685 n--;
|
|
4686 return n;
|
|
4687 }
|
|
4688
|
|
4689 size_t G1CollectedHeap::count_free_regions_list() {
|
|
4690 size_t n = 0;
|
|
4691 size_t o = 0;
|
|
4692 ZF_mon->lock_without_safepoint_check();
|
|
4693 HeapRegion* cur = _free_region_list;
|
|
4694 while (cur != NULL) {
|
|
4695 cur = cur->next_from_free_list();
|
|
4696 n++;
|
|
4697 }
|
|
4698 size_t m = unclean_region_list_length();
|
|
4699 ZF_mon->unlock();
|
|
4700 return n + m;
|
|
4701 }
|
|
4702
|
|
4703 bool G1CollectedHeap::should_set_young_locked() {
|
|
4704 assert(heap_lock_held_for_gc(),
|
|
4705 "the heap lock should already be held by or for this thread");
|
|
4706 return (g1_policy()->in_young_gc_mode() &&
|
|
4707 g1_policy()->should_add_next_region_to_young_list());
|
|
4708 }
|
|
4709
|
|
4710 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
|
|
4711 assert(heap_lock_held_for_gc(),
|
|
4712 "the heap lock should already be held by or for this thread");
|
|
4713 _young_list->push_region(hr);
|
|
4714 g1_policy()->set_region_short_lived(hr);
|
|
4715 }
|
|
4716
|
|
4717 class NoYoungRegionsClosure: public HeapRegionClosure {
|
|
4718 private:
|
|
4719 bool _success;
|
|
4720 public:
|
|
4721 NoYoungRegionsClosure() : _success(true) { }
|
|
4722 bool doHeapRegion(HeapRegion* r) {
|
|
4723 if (r->is_young()) {
|
|
4724 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
|
|
4725 r->bottom(), r->end());
|
|
4726 _success = false;
|
|
4727 }
|
|
4728 return false;
|
|
4729 }
|
|
4730 bool success() { return _success; }
|
|
4731 };
|
|
4732
|
|
4733 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
|
|
4734 bool check_sample) {
|
|
4735 bool ret = true;
|
|
4736
|
|
4737 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
|
|
4738 if (!ignore_scan_only_list) {
|
|
4739 NoYoungRegionsClosure closure;
|
|
4740 heap_region_iterate(&closure);
|
|
4741 ret = ret && closure.success();
|
|
4742 }
|
|
4743
|
|
4744 return ret;
|
|
4745 }
|
|
4746
|
|
4747 void G1CollectedHeap::empty_young_list() {
|
|
4748 assert(heap_lock_held_for_gc(),
|
|
4749 "the heap lock should already be held by or for this thread");
|
|
4750 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
|
|
4751
|
|
4752 _young_list->empty_list();
|
|
4753 }
|
|
4754
|
|
4755 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
|
|
4756 bool no_allocs = true;
|
|
4757 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
|
|
4758 HeapRegion* r = _gc_alloc_regions[ap];
|
|
4759 no_allocs = r == NULL || r->saved_mark_at_top();
|
|
4760 }
|
|
4761 return no_allocs;
|
|
4762 }
|
|
4763
|
|
4764 void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
|
|
4765 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
4766 HeapRegion* r = _gc_alloc_regions[ap];
|
|
4767 if (r != NULL) {
|
|
4768 // Check for aliases.
|
|
4769 bool has_processed_alias = false;
|
|
4770 for (int i = 0; i < ap; ++i) {
|
|
4771 if (_gc_alloc_regions[i] == r) {
|
|
4772 has_processed_alias = true;
|
|
4773 break;
|
|
4774 }
|
|
4775 }
|
|
4776 if (!has_processed_alias) {
|
|
4777 r->note_end_of_copying();
|
|
4778 g1_policy()->record_after_bytes(r->used());
|
|
4779 }
|
|
4780 }
|
|
4781 }
|
|
4782 }
|
|
4783
|
|
4784
|
|
4785 // Done at the start of full GC.
|
|
4786 void G1CollectedHeap::tear_down_region_lists() {
|
|
4787 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4788 while (pop_unclean_region_list_locked() != NULL) ;
|
|
4789 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
|
|
4790 "Postconditions of loop.")
|
|
4791 while (pop_free_region_list_locked() != NULL) ;
|
|
4792 assert(_free_region_list == NULL, "Postcondition of loop.");
|
|
4793 if (_free_region_list_size != 0) {
|
|
4794 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
|
|
4795 print();
|
|
4796 }
|
|
4797 assert(_free_region_list_size == 0, "Postconditions of loop.");
|
|
4798 }
|
|
4799
|
|
4800
|
|
4801 class RegionResetter: public HeapRegionClosure {
|
|
4802 G1CollectedHeap* _g1;
|
|
4803 int _n;
|
|
4804 public:
|
|
4805 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
|
|
4806 bool doHeapRegion(HeapRegion* r) {
|
|
4807 if (r->continuesHumongous()) return false;
|
|
4808 if (r->top() > r->bottom()) {
|
|
4809 if (r->top() < r->end()) {
|
|
4810 Copy::fill_to_words(r->top(),
|
|
4811 pointer_delta(r->end(), r->top()));
|
|
4812 }
|
|
4813 r->set_zero_fill_allocated();
|
|
4814 } else {
|
|
4815 assert(r->is_empty(), "tautology");
|
|
4816 if (r->popular()) {
|
|
4817 if (r->zero_fill_state() != HeapRegion::Allocated) {
|
|
4818 r->ensure_zero_filled_locked();
|
|
4819 r->set_zero_fill_allocated();
|
|
4820 }
|
|
4821 } else {
|
|
4822 _n++;
|
|
4823 switch (r->zero_fill_state()) {
|
|
4824 case HeapRegion::NotZeroFilled:
|
|
4825 case HeapRegion::ZeroFilling:
|
|
4826 _g1->put_region_on_unclean_list_locked(r);
|
|
4827 break;
|
|
4828 case HeapRegion::Allocated:
|
|
4829 r->set_zero_fill_complete();
|
|
4830 // no break; go on to put on free list.
|
|
4831 case HeapRegion::ZeroFilled:
|
|
4832 _g1->put_free_region_on_list_locked(r);
|
|
4833 break;
|
|
4834 }
|
|
4835 }
|
|
4836 }
|
|
4837 return false;
|
|
4838 }
|
|
4839
|
|
4840 int getFreeRegionCount() {return _n;}
|
|
4841 };
|
|
4842
|
|
4843 // Done at the end of full GC.
|
|
4844 void G1CollectedHeap::rebuild_region_lists() {
|
|
4845 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4846 // This needs to go at the end of the full GC.
|
|
4847 RegionResetter rs;
|
|
4848 heap_region_iterate(&rs);
|
|
4849 _free_regions = rs.getFreeRegionCount();
|
|
4850 // Tell the ZF thread it may have work to do.
|
|
4851 if (should_zf()) ZF_mon->notify_all();
|
|
4852 }
|
|
4853
|
|
4854 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
|
|
4855 G1CollectedHeap* _g1;
|
|
4856 int _n;
|
|
4857 public:
|
|
4858 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
|
|
4859 bool doHeapRegion(HeapRegion* r) {
|
|
4860 if (r->continuesHumongous()) return false;
|
|
4861 if (r->top() > r->bottom()) {
|
|
4862 // There are assertions in "set_zero_fill_needed()" below that
|
|
4863 // require top() == bottom(), so this is technically illegal.
|
|
4864 // We'll skirt the law here, by making that true temporarily.
|
|
4865 DEBUG_ONLY(HeapWord* save_top = r->top();
|
|
4866 r->set_top(r->bottom()));
|
|
4867 r->set_zero_fill_needed();
|
|
4868 DEBUG_ONLY(r->set_top(save_top));
|
|
4869 }
|
|
4870 return false;
|
|
4871 }
|
|
4872 };
|
|
4873
|
|
4874 // Done at the start of full GC.
|
|
4875 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
|
|
4876 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
|
4877 // This needs to go at the end of the full GC.
|
|
4878 UsedRegionsNeedZeroFillSetter rs;
|
|
4879 heap_region_iterate(&rs);
|
|
4880 }
|
|
4881
|
|
4882 class CountObjClosure: public ObjectClosure {
|
|
4883 size_t _n;
|
|
4884 public:
|
|
4885 CountObjClosure() : _n(0) {}
|
|
4886 void do_object(oop obj) { _n++; }
|
|
4887 size_t n() { return _n; }
|
|
4888 };
|
|
4889
|
|
4890 size_t G1CollectedHeap::pop_object_used_objs() {
|
|
4891 size_t sum_objs = 0;
|
|
4892 for (int i = 0; i < G1NumPopularRegions; i++) {
|
|
4893 CountObjClosure cl;
|
|
4894 _hrs->at(i)->object_iterate(&cl);
|
|
4895 sum_objs += cl.n();
|
|
4896 }
|
|
4897 return sum_objs;
|
|
4898 }
|
|
4899
|
|
4900 size_t G1CollectedHeap::pop_object_used_bytes() {
|
|
4901 size_t sum_bytes = 0;
|
|
4902 for (int i = 0; i < G1NumPopularRegions; i++) {
|
|
4903 sum_bytes += _hrs->at(i)->used();
|
|
4904 }
|
|
4905 return sum_bytes;
|
|
4906 }
|
|
4907
|
|
4908
|
|
4909 static int nq = 0;
|
|
4910
|
|
4911 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) {
|
|
4912 while (_cur_pop_hr_index < G1NumPopularRegions) {
|
|
4913 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
|
|
4914 HeapWord* res = cur_pop_region->allocate(word_size);
|
|
4915 if (res != NULL) {
|
|
4916 // We account for popular objs directly in the used summary:
|
|
4917 _summary_bytes_used += (word_size * HeapWordSize);
|
|
4918 return res;
|
|
4919 }
|
|
4920 // Otherwise, try the next region (first making sure that we remember
|
|
4921 // the last "top" value as the "next_top_at_mark_start", so that
|
|
4922 // objects made popular during markings aren't automatically considered
|
|
4923 // live).
|
|
4924 cur_pop_region->note_end_of_copying();
|
|
4925 // Otherwise, try the next region.
|
|
4926 _cur_pop_hr_index++;
|
|
4927 }
|
|
4928 // XXX: For now !!!
|
|
4929 vm_exit_out_of_memory(word_size,
|
|
4930 "Not enough pop obj space (To Be Fixed)");
|
|
4931 return NULL;
|
|
4932 }
|
|
4933
|
|
4934 class HeapRegionList: public CHeapObj {
|
|
4935 public:
|
|
4936 HeapRegion* hr;
|
|
4937 HeapRegionList* next;
|
|
4938 };
|
|
4939
|
|
4940 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) {
|
|
4941 // This might happen during parallel GC, so protect by this lock.
|
|
4942 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
|
4943 // We don't schedule regions whose evacuations are already pending, or
|
|
4944 // are already being evacuated.
|
|
4945 if (!r->popular_pending() && !r->in_collection_set()) {
|
|
4946 r->set_popular_pending(true);
|
|
4947 if (G1TracePopularity) {
|
|
4948 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" "
|
|
4949 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.",
|
|
4950 r, r->bottom(), r->end());
|
|
4951 }
|
|
4952 HeapRegionList* hrl = new HeapRegionList;
|
|
4953 hrl->hr = r;
|
|
4954 hrl->next = _popular_regions_to_be_evacuated;
|
|
4955 _popular_regions_to_be_evacuated = hrl;
|
|
4956 }
|
|
4957 }
|
|
4958
|
|
4959 HeapRegion* G1CollectedHeap::popular_region_to_evac() {
|
|
4960 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
|
4961 HeapRegion* res = NULL;
|
|
4962 while (_popular_regions_to_be_evacuated != NULL && res == NULL) {
|
|
4963 HeapRegionList* hrl = _popular_regions_to_be_evacuated;
|
|
4964 _popular_regions_to_be_evacuated = hrl->next;
|
|
4965 res = hrl->hr;
|
|
4966 // The G1RSPopLimit may have increased, so recheck here...
|
|
4967 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) {
|
|
4968 // Hah: don't need to schedule.
|
|
4969 if (G1TracePopularity) {
|
|
4970 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" "
|
|
4971 "["PTR_FORMAT", "PTR_FORMAT") "
|
|
4972 "for pop-object evacuation (size %d < limit %d)",
|
|
4973 res, res->bottom(), res->end(),
|
|
4974 res->rem_set()->occupied(), G1RSPopLimit);
|
|
4975 }
|
|
4976 res->set_popular_pending(false);
|
|
4977 res = NULL;
|
|
4978 }
|
|
4979 // We do not reset res->popular() here; if we did so, it would allow
|
|
4980 // the region to be "rescheduled" for popularity evacuation. Instead,
|
|
4981 // this is done in the collection pause, with the world stopped.
|
|
4982 // So the invariant is that the regions in the list have the popularity
|
|
4983 // boolean set, but having the boolean set does not imply membership
|
|
4984 // on the list (though there can at most one such pop-pending region
|
|
4985 // not on the list at any time).
|
|
4986 delete hrl;
|
|
4987 }
|
|
4988 return res;
|
|
4989 }
|
|
4990
|
|
4991 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) {
|
|
4992 while (true) {
|
|
4993 // Don't want to do a GC pause while cleanup is being completed!
|
|
4994 wait_for_cleanup_complete();
|
|
4995
|
|
4996 // Read the GC count while holding the Heap_lock
|
|
4997 int gc_count_before = SharedHeap::heap()->total_collections();
|
|
4998 g1_policy()->record_stop_world_start();
|
|
4999
|
|
5000 {
|
|
5001 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
|
5002 VM_G1PopRegionCollectionPause op(gc_count_before, hr);
|
|
5003 VMThread::execute(&op);
|
|
5004
|
|
5005 // If the prolog succeeded, we didn't do a GC for this.
|
|
5006 if (op.prologue_succeeded()) break;
|
|
5007 }
|
|
5008 // Otherwise we didn't. We should recheck the size, though, since
|
|
5009 // the limit may have increased...
|
|
5010 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) {
|
|
5011 hr->set_popular_pending(false);
|
|
5012 break;
|
|
5013 }
|
|
5014 }
|
|
5015 }
|
|
5016
|
|
5017 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) {
|
|
5018 Atomic::inc(obj_rc_addr(obj));
|
|
5019 }
|
|
5020
|
|
5021 class CountRCClosure: public OopsInHeapRegionClosure {
|
|
5022 G1CollectedHeap* _g1h;
|
|
5023 bool _parallel;
|
|
5024 public:
|
|
5025 CountRCClosure(G1CollectedHeap* g1h) :
|
|
5026 _g1h(g1h), _parallel(ParallelGCThreads > 0)
|
|
5027 {}
|
|
5028 void do_oop(narrowOop* p) {
|
|
5029 guarantee(false, "NYI");
|
|
5030 }
|
|
5031 void do_oop(oop* p) {
|
|
5032 oop obj = *p;
|
|
5033 assert(obj != NULL, "Precondition.");
|
|
5034 if (_parallel) {
|
|
5035 // We go sticky at the limit to avoid excess contention.
|
|
5036 // If we want to track the actual RC's further, we'll need to keep a
|
|
5037 // per-thread hash table or something for the popular objects.
|
|
5038 if (_g1h->obj_rc(obj) < G1ObjPopLimit) {
|
|
5039 _g1h->atomic_inc_obj_rc(obj);
|
|
5040 }
|
|
5041 } else {
|
|
5042 _g1h->inc_obj_rc(obj);
|
|
5043 }
|
|
5044 }
|
|
5045 };
|
|
5046
|
|
5047 class EvacPopObjClosure: public ObjectClosure {
|
|
5048 G1CollectedHeap* _g1h;
|
|
5049 size_t _pop_objs;
|
|
5050 size_t _max_rc;
|
|
5051 public:
|
|
5052 EvacPopObjClosure(G1CollectedHeap* g1h) :
|
|
5053 _g1h(g1h), _pop_objs(0), _max_rc(0) {}
|
|
5054
|
|
5055 void do_object(oop obj) {
|
|
5056 size_t rc = _g1h->obj_rc(obj);
|
|
5057 _max_rc = MAX2(rc, _max_rc);
|
|
5058 if (rc >= (size_t) G1ObjPopLimit) {
|
|
5059 _g1h->_pop_obj_rc_at_copy.add((double)rc);
|
|
5060 size_t word_sz = obj->size();
|
|
5061 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz);
|
|
5062 oop new_pop_obj = (oop)new_pop_loc;
|
|
5063 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz);
|
|
5064 obj->forward_to(new_pop_obj);
|
|
5065 G1ScanAndBalanceClosure scan_and_balance(_g1h);
|
|
5066 new_pop_obj->oop_iterate_backwards(&scan_and_balance);
|
|
5067 // preserve "next" mark bit if marking is in progress.
|
|
5068 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) {
|
|
5069 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj);
|
|
5070 }
|
|
5071
|
|
5072 if (G1TracePopularity) {
|
|
5073 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT
|
|
5074 " pop (%d), move to " PTR_FORMAT,
|
|
5075 (void*) obj, word_sz,
|
|
5076 _g1h->obj_rc(obj), (void*) new_pop_obj);
|
|
5077 }
|
|
5078 _pop_objs++;
|
|
5079 }
|
|
5080 }
|
|
5081 size_t pop_objs() { return _pop_objs; }
|
|
5082 size_t max_rc() { return _max_rc; }
|
|
5083 };
|
|
5084
|
|
5085 class G1ParCountRCTask : public AbstractGangTask {
|
|
5086 G1CollectedHeap* _g1h;
|
|
5087 BitMap _bm;
|
|
5088
|
|
5089 size_t getNCards() {
|
|
5090 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
|
|
5091 / G1BlockOffsetSharedArray::N_bytes;
|
|
5092 }
|
|
5093 CountRCClosure _count_rc_closure;
|
|
5094 public:
|
|
5095 G1ParCountRCTask(G1CollectedHeap* g1h) :
|
|
5096 AbstractGangTask("G1 Par RC Count task"),
|
|
5097 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h)
|
|
5098 {}
|
|
5099
|
|
5100 void work(int i) {
|
|
5101 ResourceMark rm;
|
|
5102 HandleMark hm;
|
|
5103 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i);
|
|
5104 }
|
|
5105 };
|
|
5106
|
|
5107 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) {
|
|
5108 // We're evacuating a single region (for popularity).
|
|
5109 if (G1TracePopularity) {
|
|
5110 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")",
|
|
5111 popular_region->bottom(), popular_region->end());
|
|
5112 }
|
|
5113 g1_policy()->set_single_region_collection_set(popular_region);
|
|
5114 size_t max_rc;
|
|
5115 if (!compute_reference_counts_and_evac_popular(popular_region,
|
|
5116 &max_rc)) {
|
|
5117 // We didn't evacuate any popular objects.
|
|
5118 // We increase the RS popularity limit, to prevent this from
|
|
5119 // happening in the future.
|
|
5120 if (G1RSPopLimit < (1 << 30)) {
|
|
5121 G1RSPopLimit *= 2;
|
|
5122 }
|
|
5123 // For now, interesting enough for a message:
|
|
5124 #if 1
|
|
5125 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), "
|
|
5126 "failed to find a pop object (max = %d).",
|
|
5127 popular_region->bottom(), popular_region->end(),
|
|
5128 max_rc);
|
|
5129 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit);
|
|
5130 #endif // 0
|
|
5131 // Also, we reset the collection set to NULL, to make the rest of
|
|
5132 // the collection do nothing.
|
|
5133 assert(popular_region->next_in_collection_set() == NULL,
|
|
5134 "should be single-region.");
|
|
5135 popular_region->set_in_collection_set(false);
|
|
5136 popular_region->set_popular_pending(false);
|
|
5137 g1_policy()->clear_collection_set();
|
|
5138 }
|
|
5139 }
|
|
5140
|
|
5141 bool G1CollectedHeap::
|
|
5142 compute_reference_counts_and_evac_popular(HeapRegion* popular_region,
|
|
5143 size_t* max_rc) {
|
|
5144 HeapWord* rc_region_bot;
|
|
5145 HeapWord* rc_region_end;
|
|
5146
|
|
5147 // Set up the reference count region.
|
|
5148 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords);
|
|
5149 if (rc_region != NULL) {
|
|
5150 rc_region_bot = rc_region->bottom();
|
|
5151 rc_region_end = rc_region->end();
|
|
5152 } else {
|
|
5153 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords);
|
|
5154 if (rc_region_bot == NULL) {
|
|
5155 vm_exit_out_of_memory(HeapRegion::GrainWords,
|
|
5156 "No space for RC region.");
|
|
5157 }
|
|
5158 rc_region_end = rc_region_bot + HeapRegion::GrainWords;
|
|
5159 }
|
|
5160
|
|
5161 if (G1TracePopularity)
|
|
5162 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")",
|
|
5163 rc_region_bot, rc_region_end);
|
|
5164 if (rc_region_bot > popular_region->bottom()) {
|
|
5165 _rc_region_above = true;
|
|
5166 _rc_region_diff =
|
|
5167 pointer_delta(rc_region_bot, popular_region->bottom(), 1);
|
|
5168 } else {
|
|
5169 assert(rc_region_bot < popular_region->bottom(), "Can't be equal.");
|
|
5170 _rc_region_above = false;
|
|
5171 _rc_region_diff =
|
|
5172 pointer_delta(popular_region->bottom(), rc_region_bot, 1);
|
|
5173 }
|
|
5174 g1_policy()->record_pop_compute_rc_start();
|
|
5175 // Count external references.
|
|
5176 g1_rem_set()->prepare_for_oops_into_collection_set_do();
|
|
5177 if (ParallelGCThreads > 0) {
|
|
5178
|
|
5179 set_par_threads(workers()->total_workers());
|
|
5180 G1ParCountRCTask par_count_rc_task(this);
|
|
5181 workers()->run_task(&par_count_rc_task);
|
|
5182 set_par_threads(0);
|
|
5183
|
|
5184 } else {
|
|
5185 CountRCClosure count_rc_closure(this);
|
|
5186 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0);
|
|
5187 }
|
|
5188 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
|
5189 g1_policy()->record_pop_compute_rc_end();
|
|
5190
|
|
5191 // Now evacuate popular objects.
|
|
5192 g1_policy()->record_pop_evac_start();
|
|
5193 EvacPopObjClosure evac_pop_obj_cl(this);
|
|
5194 popular_region->object_iterate(&evac_pop_obj_cl);
|
|
5195 *max_rc = evac_pop_obj_cl.max_rc();
|
|
5196
|
|
5197 // Make sure the last "top" value of the current popular region is copied
|
|
5198 // as the "next_top_at_mark_start", so that objects made popular during
|
|
5199 // markings aren't automatically considered live.
|
|
5200 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
|
|
5201 cur_pop_region->note_end_of_copying();
|
|
5202
|
|
5203 if (rc_region != NULL) {
|
|
5204 free_region(rc_region);
|
|
5205 } else {
|
|
5206 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot);
|
|
5207 }
|
|
5208 g1_policy()->record_pop_evac_end();
|
|
5209
|
|
5210 return evac_pop_obj_cl.pop_objs() > 0;
|
|
5211 }
|
|
5212
|
|
5213 class CountPopObjInfoClosure: public HeapRegionClosure {
|
|
5214 size_t _objs;
|
|
5215 size_t _bytes;
|
|
5216
|
|
5217 class CountObjClosure: public ObjectClosure {
|
|
5218 int _n;
|
|
5219 public:
|
|
5220 CountObjClosure() : _n(0) {}
|
|
5221 void do_object(oop obj) { _n++; }
|
|
5222 size_t n() { return _n; }
|
|
5223 };
|
|
5224
|
|
5225 public:
|
|
5226 CountPopObjInfoClosure() : _objs(0), _bytes(0) {}
|
|
5227 bool doHeapRegion(HeapRegion* r) {
|
|
5228 _bytes += r->used();
|
|
5229 CountObjClosure blk;
|
|
5230 r->object_iterate(&blk);
|
|
5231 _objs += blk.n();
|
|
5232 return false;
|
|
5233 }
|
|
5234 size_t objs() { return _objs; }
|
|
5235 size_t bytes() { return _bytes; }
|
|
5236 };
|
|
5237
|
|
5238
|
|
5239 void G1CollectedHeap::print_popularity_summary_info() const {
|
|
5240 CountPopObjInfoClosure blk;
|
|
5241 for (int i = 0; i <= _cur_pop_hr_index; i++) {
|
|
5242 blk.doHeapRegion(_hrs->at(i));
|
|
5243 }
|
|
5244 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.",
|
|
5245 blk.objs(), blk.bytes());
|
|
5246 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].",
|
|
5247 _pop_obj_rc_at_copy.avg(),
|
|
5248 _pop_obj_rc_at_copy.maximum(),
|
|
5249 _pop_obj_rc_at_copy.sd());
|
|
5250 }
|
|
5251
|
|
5252 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
|
5253 _refine_cte_cl->set_concurrent(concurrent);
|
|
5254 }
|
|
5255
|
|
5256 #ifndef PRODUCT
|
|
5257
|
|
5258 class PrintHeapRegionClosure: public HeapRegionClosure {
|
|
5259 public:
|
|
5260 bool doHeapRegion(HeapRegion *r) {
|
|
5261 gclog_or_tty->print("Region: "PTR_FORMAT":", r);
|
|
5262 if (r != NULL) {
|
|
5263 if (r->is_on_free_list())
|
|
5264 gclog_or_tty->print("Free ");
|
|
5265 if (r->is_young())
|
|
5266 gclog_or_tty->print("Young ");
|
|
5267 if (r->isHumongous())
|
|
5268 gclog_or_tty->print("Is Humongous ");
|
|
5269 r->print();
|
|
5270 }
|
|
5271 return false;
|
|
5272 }
|
|
5273 };
|
|
5274
|
|
5275 class SortHeapRegionClosure : public HeapRegionClosure {
|
|
5276 size_t young_regions,free_regions, unclean_regions;
|
|
5277 size_t hum_regions, count;
|
|
5278 size_t unaccounted, cur_unclean, cur_alloc;
|
|
5279 size_t total_free;
|
|
5280 HeapRegion* cur;
|
|
5281 public:
|
|
5282 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
|
|
5283 free_regions(0), unclean_regions(0),
|
|
5284 hum_regions(0),
|
|
5285 count(0), unaccounted(0),
|
|
5286 cur_alloc(0), total_free(0)
|
|
5287 {}
|
|
5288 bool doHeapRegion(HeapRegion *r) {
|
|
5289 count++;
|
|
5290 if (r->is_on_free_list()) free_regions++;
|
|
5291 else if (r->is_on_unclean_list()) unclean_regions++;
|
|
5292 else if (r->isHumongous()) hum_regions++;
|
|
5293 else if (r->is_young()) young_regions++;
|
|
5294 else if (r == cur) cur_alloc++;
|
|
5295 else unaccounted++;
|
|
5296 return false;
|
|
5297 }
|
|
5298 void print() {
|
|
5299 total_free = free_regions + unclean_regions;
|
|
5300 gclog_or_tty->print("%d regions\n", count);
|
|
5301 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
|
|
5302 total_free, free_regions, unclean_regions);
|
|
5303 gclog_or_tty->print("%d humongous %d young\n",
|
|
5304 hum_regions, young_regions);
|
|
5305 gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
|
|
5306 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
|
|
5307 }
|
|
5308 };
|
|
5309
|
|
5310 void G1CollectedHeap::print_region_counts() {
|
|
5311 SortHeapRegionClosure sc(_cur_alloc_region);
|
|
5312 PrintHeapRegionClosure cl;
|
|
5313 heap_region_iterate(&cl);
|
|
5314 heap_region_iterate(&sc);
|
|
5315 sc.print();
|
|
5316 print_region_accounting_info();
|
|
5317 };
|
|
5318
|
|
5319 bool G1CollectedHeap::regions_accounted_for() {
|
|
5320 // TODO: regions accounting for young/survivor/tenured
|
|
5321 return true;
|
|
5322 }
|
|
5323
|
|
5324 bool G1CollectedHeap::print_region_accounting_info() {
|
|
5325 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions);
|
|
5326 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
|
|
5327 free_regions(),
|
|
5328 count_free_regions(), count_free_regions_list(),
|
|
5329 _free_region_list_size, _unclean_region_list.sz());
|
|
5330 gclog_or_tty->print_cr("cur_alloc: %d.",
|
|
5331 (_cur_alloc_region == NULL ? 0 : 1));
|
|
5332 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
|
|
5333
|
|
5334 // TODO: check regions accounting for young/survivor/tenured
|
|
5335 return true;
|
|
5336 }
|
|
5337
|
|
5338 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
|
5339 HeapRegion* hr = heap_region_containing(p);
|
|
5340 if (hr == NULL) {
|
|
5341 return is_in_permanent(p);
|
|
5342 } else {
|
|
5343 return hr->is_in(p);
|
|
5344 }
|
|
5345 }
|
|
5346 #endif // PRODUCT
|
|
5347
|
|
5348 void G1CollectedHeap::g1_unimplemented() {
|
|
5349 // Unimplemented();
|
|
5350 }
|
|
5351
|
|
5352
|
|
5353 // Local Variables: ***
|
|
5354 // c-indentation-style: gnu ***
|
|
5355 // End: ***
|