comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 9fa3bf3043a2
children d3cec14f33f3
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
25 #if !defined(__clang_major__) && defined(__GNUC__) 25 #if !defined(__clang_major__) && defined(__GNUC__)
26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess. 26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
27 #endif 27 #endif
28 28
29 #include "precompiled.hpp" 29 #include "precompiled.hpp"
30 #include "classfile/metadataOnStackMark.hpp"
30 #include "code/codeCache.hpp" 31 #include "code/codeCache.hpp"
31 #include "code/icBuffer.hpp" 32 #include "code/icBuffer.hpp"
32 #include "gc_implementation/g1/bufferingOopClosure.hpp" 33 #include "gc_implementation/g1/bufferingOopClosure.hpp"
33 #include "gc_implementation/g1/concurrentG1Refine.hpp" 34 #include "gc_implementation/g1/concurrentG1Refine.hpp"
34 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" 35 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
40 #include "gc_implementation/g1/g1EvacFailure.hpp" 41 #include "gc_implementation/g1/g1EvacFailure.hpp"
41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 42 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
42 #include "gc_implementation/g1/g1Log.hpp" 43 #include "gc_implementation/g1/g1Log.hpp"
43 #include "gc_implementation/g1/g1MarkSweep.hpp" 44 #include "gc_implementation/g1/g1MarkSweep.hpp"
44 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 45 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
46 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
47 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
45 #include "gc_implementation/g1/g1RemSet.inline.hpp" 48 #include "gc_implementation/g1/g1RemSet.inline.hpp"
46 #include "gc_implementation/g1/g1StringDedup.hpp" 49 #include "gc_implementation/g1/g1StringDedup.hpp"
47 #include "gc_implementation/g1/g1YCTypes.hpp" 50 #include "gc_implementation/g1/g1YCTypes.hpp"
48 #include "gc_implementation/g1/heapRegion.inline.hpp" 51 #include "gc_implementation/g1/heapRegion.inline.hpp"
49 #include "gc_implementation/g1/heapRegionRemSet.hpp" 52 #include "gc_implementation/g1/heapRegionRemSet.hpp"
50 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 53 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
51 #include "gc_implementation/g1/vm_operations_g1.hpp" 54 #include "gc_implementation/g1/vm_operations_g1.hpp"
52 #include "gc_implementation/shared/gcHeapSummary.hpp" 55 #include "gc_implementation/shared/gcHeapSummary.hpp"
53 #include "gc_implementation/shared/gcTimer.hpp" 56 #include "gc_implementation/shared/gcTimer.hpp"
54 #include "gc_implementation/shared/gcTrace.hpp" 57 #include "gc_implementation/shared/gcTrace.hpp"
55 #include "gc_implementation/shared/gcTraceTime.hpp" 58 #include "gc_implementation/shared/gcTraceTime.hpp"
56 #include "gc_implementation/shared/isGCActiveMark.hpp" 59 #include "gc_implementation/shared/isGCActiveMark.hpp"
60 #include "memory/allocation.hpp"
57 #include "memory/gcLocker.inline.hpp" 61 #include "memory/gcLocker.inline.hpp"
58 #include "memory/generationSpec.hpp" 62 #include "memory/generationSpec.hpp"
59 #include "memory/iterator.hpp" 63 #include "memory/iterator.hpp"
60 #include "memory/referenceProcessor.hpp" 64 #include "memory/referenceProcessor.hpp"
61 #include "oops/oop.inline.hpp" 65 #include "oops/oop.inline.hpp"
62 #include "oops/oop.pcgc.inline.hpp" 66 #include "oops/oop.pcgc.inline.hpp"
67 #include "runtime/orderAccess.inline.hpp"
63 #include "runtime/vmThread.hpp" 68 #include "runtime/vmThread.hpp"
64 #include "utilities/ticks.hpp"
65 69
66 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; 70 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
67 71
68 // turn it on so that the contents of the young list (scan-only / 72 // turn it on so that the contents of the young list (scan-only /
69 // to-be-collected) are printed at "strategic" points before / during 73 // to-be-collected) are printed at "strategic" points before / during
84 // Notes on implementation of parallelism in different tasks. 88 // Notes on implementation of parallelism in different tasks.
85 // 89 //
86 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. 90 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
87 // The number of GC workers is passed to heap_region_par_iterate_chunked(). 91 // The number of GC workers is passed to heap_region_par_iterate_chunked().
88 // It does use run_task() which sets _n_workers in the task. 92 // It does use run_task() which sets _n_workers in the task.
89 // G1ParTask executes g1_process_strong_roots() -> 93 // G1ParTask executes g1_process_roots() ->
90 // SharedHeap::process_strong_roots() which calls eventually to 94 // SharedHeap::process_roots() which calls eventually to
91 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses 95 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
92 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also 96 // SequentialSubTasksDone. SharedHeap::process_roots() also
93 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). 97 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
94 // 98 //
95 99
96 // Local to this file. 100 // Local to this file.
97 101
98 class RefineCardTableEntryClosure: public CardTableEntryClosure { 102 class RefineCardTableEntryClosure: public CardTableEntryClosure {
99 SuspendibleThreadSet* _sts;
100 G1RemSet* _g1rs;
101 ConcurrentG1Refine* _cg1r;
102 bool _concurrent; 103 bool _concurrent;
103 public: 104 public:
104 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, 105 RefineCardTableEntryClosure() : _concurrent(true) { }
105 G1RemSet* g1rs, 106
106 ConcurrentG1Refine* cg1r) :
107 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
108 {}
109 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 107 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
110 bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); 108 bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
111 // This path is executed by the concurrent refine or mutator threads, 109 // This path is executed by the concurrent refine or mutator threads,
112 // concurrently, and so we do not care if card_ptr contains references 110 // concurrently, and so we do not care if card_ptr contains references
113 // that point into the collection set. 111 // that point into the collection set.
114 assert(!oops_into_cset, "should be"); 112 assert(!oops_into_cset, "should be");
115 113
116 if (_concurrent && _sts->should_yield()) { 114 if (_concurrent && SuspendibleThreadSet::should_yield()) {
117 // Caller will actually yield. 115 // Caller will actually yield.
118 return false; 116 return false;
119 } 117 }
120 // Otherwise, we finished successfully; return true. 118 // Otherwise, we finished successfully; return true.
121 return true; 119 return true;
122 } 120 }
121
123 void set_concurrent(bool b) { _concurrent = b; } 122 void set_concurrent(bool b) { _concurrent = b; }
124 }; 123 };
125 124
126 125
127 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { 126 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
128 int _calls; 127 size_t _num_processed;
129 G1CollectedHeap* _g1h;
130 CardTableModRefBS* _ctbs; 128 CardTableModRefBS* _ctbs;
131 int _histo[256]; 129 int _histo[256];
132 public: 130
131 public:
133 ClearLoggedCardTableEntryClosure() : 132 ClearLoggedCardTableEntryClosure() :
134 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) 133 _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
135 { 134 {
136 for (int i = 0; i < 256; i++) _histo[i] = 0; 135 for (int i = 0; i < 256; i++) _histo[i] = 0;
137 } 136 }
137
138 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 138 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
139 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 139 unsigned char* ujb = (unsigned char*)card_ptr;
140 _calls++; 140 int ind = (int)(*ujb);
141 unsigned char* ujb = (unsigned char*)card_ptr; 141 _histo[ind]++;
142 int ind = (int)(*ujb); 142
143 _histo[ind]++; 143 *card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
144 *card_ptr = -1; 144 _num_processed++;
145 } 145
146 return true; 146 return true;
147 } 147 }
148 int calls() { return _calls; } 148
149 size_t num_processed() { return _num_processed; }
150
149 void print_histo() { 151 void print_histo() {
150 gclog_or_tty->print_cr("Card table value histogram:"); 152 gclog_or_tty->print_cr("Card table value histogram:");
151 for (int i = 0; i < 256; i++) { 153 for (int i = 0; i < 256; i++) {
152 if (_histo[i] != 0) { 154 if (_histo[i] != 0) {
153 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); 155 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
154 } 156 }
155 } 157 }
156 } 158 }
157 }; 159 };
158 160
159 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { 161 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
160 int _calls; 162 private:
161 G1CollectedHeap* _g1h; 163 size_t _num_processed;
162 CardTableModRefBS* _ctbs; 164
163 public: 165 public:
164 RedirtyLoggedCardTableEntryClosure() : 166 RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
165 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
166 167
167 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 168 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
168 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 169 *card_ptr = CardTableModRefBS::dirty_card_val();
169 _calls++; 170 _num_processed++;
170 *card_ptr = 0;
171 }
172 return true; 171 return true;
173 } 172 }
174 int calls() { return _calls; } 173
174 size_t num_processed() const { return _num_processed; }
175 }; 175 };
176 176
177 YoungList::YoungList(G1CollectedHeap* g1h) : 177 YoungList::YoungList(G1CollectedHeap* g1h) :
178 _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0), 178 _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
179 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) { 179 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
206 void YoungList::empty_list(HeapRegion* list) { 206 void YoungList::empty_list(HeapRegion* list) {
207 while (list != NULL) { 207 while (list != NULL) {
208 HeapRegion* next = list->get_next_young_region(); 208 HeapRegion* next = list->get_next_young_region();
209 list->set_next_young_region(NULL); 209 list->set_next_young_region(NULL);
210 list->uninstall_surv_rate_group(); 210 list->uninstall_surv_rate_group();
211 list->set_not_young(); 211 // This is called before a Full GC and all the non-empty /
212 // non-humongous regions at the end of the Full GC will end up as
213 // old anyway.
214 list->set_old();
212 list = next; 215 list = next;
213 } 216 }
214 } 217 }
215 218
216 void YoungList::empty_list() { 219 void YoungList::empty_list() {
365 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); 368 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
366 HeapRegion *curr = lists[list]; 369 HeapRegion *curr = lists[list];
367 if (curr == NULL) 370 if (curr == NULL)
368 gclog_or_tty->print_cr(" empty"); 371 gclog_or_tty->print_cr(" empty");
369 while (curr != NULL) { 372 while (curr != NULL) {
370 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", 373 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
371 HR_FORMAT_PARAMS(curr), 374 HR_FORMAT_PARAMS(curr),
372 curr->prev_top_at_mark_start(), 375 curr->prev_top_at_mark_start(),
373 curr->next_top_at_mark_start(), 376 curr->next_top_at_mark_start(),
374 curr->age_in_surv_rate_group_cond()); 377 curr->age_in_surv_rate_group_cond());
375 curr = curr->get_next_young_region(); 378 curr = curr->get_next_young_region();
376 } 379 }
377 } 380 }
378 381
379 gclog_or_tty->cr(); 382 gclog_or_tty->cr();
383 }
384
385 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
386 OtherRegionsTable::invalidate(start_idx, num_regions);
387 }
388
389 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
390 // The from card cache is not the memory that is actually committed. So we cannot
391 // take advantage of the zero_filled parameter.
392 reset_from_card_cache(start_idx, num_regions);
380 } 393 }
381 394
382 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) 395 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
383 { 396 {
384 // Claim the right to put the region on the dirty cards region list 397 // Claim the right to put the region on the dirty cards region list
442 // be move during a partial collection. Though it can be 455 // be move during a partial collection. Though it can be
443 // inaccurate, it is sufficient for G1 because the conservative 456 // inaccurate, it is sufficient for G1 because the conservative
444 // implementation of is_scavengable() for G1 will indicate that 457 // implementation of is_scavengable() for G1 will indicate that
445 // all nmethods must be scanned during a partial collection. 458 // all nmethods must be scanned during a partial collection.
446 bool G1CollectedHeap::is_in_partial_collection(const void* p) { 459 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
447 HeapRegion* hr = heap_region_containing(p); 460 if (p == NULL) {
448 return hr != NULL && hr->in_collection_set(); 461 return false;
462 }
463 return heap_region_containing(p)->in_collection_set();
449 } 464 }
450 #endif 465 #endif
451 466
452 // Returns true if the reference points to an object that 467 // Returns true if the reference points to an object that
453 // can move in an incremental collection. 468 // can move in an incremental collection.
454 bool G1CollectedHeap::is_scavengable(const void* p) { 469 bool G1CollectedHeap::is_scavengable(const void* p) {
455 G1CollectedHeap* g1h = G1CollectedHeap::heap();
456 G1CollectorPolicy* g1p = g1h->g1_policy();
457 HeapRegion* hr = heap_region_containing(p); 470 HeapRegion* hr = heap_region_containing(p);
458 if (hr == NULL) { 471 return !hr->isHumongous();
459 // null
460 assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
461 return false;
462 } else {
463 return !hr->isHumongous();
464 }
465 } 472 }
466 473
467 void G1CollectedHeap::check_ct_logs_at_safepoint() { 474 void G1CollectedHeap::check_ct_logs_at_safepoint() {
468 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 475 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
469 CardTableModRefBS* ct_bs = g1_barrier_set(); 476 CardTableModRefBS* ct_bs = g1_barrier_set();
473 ct_bs->mod_card_iterate(&count1); 480 ct_bs->mod_card_iterate(&count1);
474 int orig_count = count1.n(); 481 int orig_count = count1.n();
475 482
476 // First clear the logged cards. 483 // First clear the logged cards.
477 ClearLoggedCardTableEntryClosure clear; 484 ClearLoggedCardTableEntryClosure clear;
478 dcqs.set_closure(&clear); 485 dcqs.apply_closure_to_all_completed_buffers(&clear);
479 dcqs.apply_closure_to_all_completed_buffers(); 486 dcqs.iterate_closure_all_threads(&clear, false);
480 dcqs.iterate_closure_all_threads(false);
481 clear.print_histo(); 487 clear.print_histo();
482 488
483 // Now ensure that there's no dirty cards. 489 // Now ensure that there's no dirty cards.
484 CountNonCleanMemRegionClosure count2(this); 490 CountNonCleanMemRegionClosure count2(this);
485 ct_bs->mod_card_iterate(&count2); 491 ct_bs->mod_card_iterate(&count2);
488 count2.n(), orig_count); 494 count2.n(), orig_count);
489 } 495 }
490 guarantee(count2.n() == 0, "Card table should be clean."); 496 guarantee(count2.n() == 0, "Card table should be clean.");
491 497
492 RedirtyLoggedCardTableEntryClosure redirty; 498 RedirtyLoggedCardTableEntryClosure redirty;
493 JavaThread::dirty_card_queue_set().set_closure(&redirty); 499 dcqs.apply_closure_to_all_completed_buffers(&redirty);
494 dcqs.apply_closure_to_all_completed_buffers(); 500 dcqs.iterate_closure_all_threads(&redirty, false);
495 dcqs.iterate_closure_all_threads(false);
496 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", 501 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
497 clear.calls(), orig_count); 502 clear.num_processed(), orig_count);
498 guarantee(redirty.calls() == clear.calls(), 503 guarantee(redirty.num_processed() == clear.num_processed(),
499 "Or else mechanism is broken."); 504 err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
505 redirty.num_processed(), clear.num_processed()));
500 506
501 CountNonCleanMemRegionClosure count3(this); 507 CountNonCleanMemRegionClosure count3(this);
502 ct_bs->mod_card_iterate(&count3); 508 ct_bs->mod_card_iterate(&count3);
503 if (count3.n() != orig_count) { 509 if (count3.n() != orig_count) {
504 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", 510 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
505 orig_count, count3.n()); 511 orig_count, count3.n());
506 guarantee(count3.n() >= orig_count, "Should have restored them all."); 512 guarantee(count3.n() >= orig_count, "Should have restored them all.");
507 } 513 }
508
509 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
510 } 514 }
511 515
512 // Private class members. 516 // Private class members.
513 517
514 G1CollectedHeap* G1CollectedHeap::_g1h; 518 G1CollectedHeap* G1CollectedHeap::_g1h;
528 // It looks as if there are free regions available on the 532 // It looks as if there are free regions available on the
529 // secondary_free_list. Let's move them to the free_list and try 533 // secondary_free_list. Let's move them to the free_list and try
530 // again to allocate from it. 534 // again to allocate from it.
531 append_secondary_free_list(); 535 append_secondary_free_list();
532 536
533 assert(!_free_list.is_empty(), "if the secondary_free_list was not " 537 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
534 "empty we should have moved at least one entry to the free_list"); 538 "empty we should have moved at least one entry to the free_list");
535 HeapRegion* res = _free_list.remove_region(is_old); 539 HeapRegion* res = _hrm.allocate_free_region(is_old);
536 if (G1ConcRegionFreeingVerbose) { 540 if (G1ConcRegionFreeingVerbose) {
537 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 541 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
538 "allocated "HR_FORMAT" from secondary_free_list", 542 "allocated "HR_FORMAT" from secondary_free_list",
539 HR_FORMAT_PARAMS(res)); 543 HR_FORMAT_PARAMS(res));
540 } 544 }
571 return res; 575 return res;
572 } 576 }
573 } 577 }
574 } 578 }
575 579
576 res = _free_list.remove_region(is_old); 580 res = _hrm.allocate_free_region(is_old);
577 581
578 if (res == NULL) { 582 if (res == NULL) {
579 if (G1ConcRegionFreeingVerbose) { 583 if (G1ConcRegionFreeingVerbose) {
580 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 584 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
581 "res == NULL, trying the secondary_free_list"); 585 "res == NULL, trying the secondary_free_list");
596 word_size * HeapWordSize); 600 word_size * HeapWordSize);
597 if (expand(word_size * HeapWordSize)) { 601 if (expand(word_size * HeapWordSize)) {
598 // Given that expand() succeeded in expanding the heap, and we 602 // Given that expand() succeeded in expanding the heap, and we
599 // always expand the heap by an amount aligned to the heap 603 // always expand the heap by an amount aligned to the heap
600 // region size, the free list should in theory not be empty. 604 // region size, the free list should in theory not be empty.
601 // In either case remove_region() will check for NULL. 605 // In either case allocate_free_region() will check for NULL.
602 res = _free_list.remove_region(is_old); 606 res = _hrm.allocate_free_region(is_old);
603 } else { 607 } else {
604 _expand_heap_after_alloc_failure = false; 608 _expand_heap_after_alloc_failure = false;
605 } 609 }
606 } 610 }
607 return res; 611 return res;
608 }
609
610 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
611 size_t word_size) {
612 assert(isHumongous(word_size), "word_size should be humongous");
613 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
614
615 uint first = G1_NULL_HRS_INDEX;
616 if (num_regions == 1) {
617 // Only one region to allocate, no need to go through the slower
618 // path. The caller will attempt the expansion if this fails, so
619 // let's not try to expand here too.
620 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
621 if (hr != NULL) {
622 first = hr->hrs_index();
623 } else {
624 first = G1_NULL_HRS_INDEX;
625 }
626 } else {
627 // We can't allocate humongous regions while cleanupComplete() is
628 // running, since some of the regions we find to be empty might not
629 // yet be added to the free list and it is not straightforward to
630 // know which list they are on so that we can remove them. Note
631 // that we only need to do this if we need to allocate more than
632 // one region to satisfy the current humongous allocation
633 // request. If we are only allocating one region we use the common
634 // region allocation code (see above).
635 wait_while_free_regions_coming();
636 append_secondary_free_list_if_not_empty_with_lock();
637
638 if (free_regions() >= num_regions) {
639 first = _hrs.find_contiguous(num_regions);
640 if (first != G1_NULL_HRS_INDEX) {
641 for (uint i = first; i < first + num_regions; ++i) {
642 HeapRegion* hr = region_at(i);
643 assert(hr->is_empty(), "sanity");
644 assert(is_on_master_free_list(hr), "sanity");
645 hr->set_pending_removal(true);
646 }
647 _free_list.remove_all_pending(num_regions);
648 }
649 }
650 }
651 return first;
652 } 612 }
653 613
654 HeapWord* 614 HeapWord*
655 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, 615 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
656 uint num_regions, 616 uint num_regions,
657 size_t word_size) { 617 size_t word_size,
658 assert(first != G1_NULL_HRS_INDEX, "pre-condition"); 618 AllocationContext_t context) {
619 assert(first != G1_NO_HRM_INDEX, "pre-condition");
659 assert(isHumongous(word_size), "word_size should be humongous"); 620 assert(isHumongous(word_size), "word_size should be humongous");
660 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 621 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
661 622
662 // Index of last region in the series + 1. 623 // Index of last region in the series + 1.
663 uint last = first + num_regions; 624 uint last = first + num_regions;
704 // We will set up the first region as "starts humongous". This 665 // We will set up the first region as "starts humongous". This
705 // will also update the BOT covering all the regions to reflect 666 // will also update the BOT covering all the regions to reflect
706 // that there is a single object that starts at the bottom of the 667 // that there is a single object that starts at the bottom of the
707 // first region. 668 // first region.
708 first_hr->set_startsHumongous(new_top, new_end); 669 first_hr->set_startsHumongous(new_top, new_end);
709 670 first_hr->set_allocation_context(context);
710 // Then, if there are any, we will set up the "continues 671 // Then, if there are any, we will set up the "continues
711 // humongous" regions. 672 // humongous" regions.
712 HeapRegion* hr = NULL; 673 HeapRegion* hr = NULL;
713 for (uint i = first + 1; i < last; ++i) { 674 for (uint i = first + 1; i < last; ++i) {
714 hr = region_at(i); 675 hr = region_at(i);
715 hr->set_continuesHumongous(first_hr); 676 hr->set_continuesHumongous(first_hr);
677 hr->set_allocation_context(context);
716 } 678 }
717 // If we have "continues humongous" regions (hr != NULL), then the 679 // If we have "continues humongous" regions (hr != NULL), then the
718 // end of the last one should match new_end. 680 // end of the last one should match new_end.
719 assert(hr == NULL || hr->end() == new_end, "sanity"); 681 assert(hr == NULL || hr->end() == new_end, "sanity");
720 682
774 // If we have continues humongous regions (hr != NULL), then the 736 // If we have continues humongous regions (hr != NULL), then the
775 // end of the last one should match new_end and its top should 737 // end of the last one should match new_end and its top should
776 // match new_top. 738 // match new_top.
777 assert(hr == NULL || 739 assert(hr == NULL ||
778 (hr->end() == new_end && hr->top() == new_top), "sanity"); 740 (hr->end() == new_end && hr->top() == new_top), "sanity");
741 check_bitmaps("Humongous Region Allocation", first_hr);
779 742
780 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); 743 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
781 _summary_bytes_used += first_hr->used(); 744 _allocator->increase_used(first_hr->used());
782 _humongous_set.add(first_hr); 745 _humongous_set.add(first_hr);
783 746
784 return new_obj; 747 return new_obj;
785 } 748 }
786 749
787 // If could fit into free regions w/o expansion, try. 750 // If could fit into free regions w/o expansion, try.
788 // Otherwise, if can expand, do so. 751 // Otherwise, if can expand, do so.
789 // Otherwise, if using ex regions might help, try with ex given back. 752 // Otherwise, if using ex regions might help, try with ex given back.
790 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { 753 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
791 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 754 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
792 755
793 verify_region_sets_optional(); 756 verify_region_sets_optional();
794 757
795 size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords); 758 uint first = G1_NO_HRM_INDEX;
796 uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords); 759 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
797 uint x_num = expansion_regions(); 760
798 uint fs = _hrs.free_suffix(); 761 if (obj_regions == 1) {
799 uint first = humongous_obj_allocate_find_first(num_regions, word_size); 762 // Only one region to allocate, try to use a fast path by directly allocating
800 if (first == G1_NULL_HRS_INDEX) { 763 // from the free lists. Do not try to expand here, we will potentially do that
801 // The only thing we can do now is attempt expansion. 764 // later.
802 if (fs + x_num >= num_regions) { 765 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
803 // If the number of regions we're trying to allocate for this 766 if (hr != NULL) {
804 // object is at most the number of regions in the free suffix, 767 first = hr->hrm_index();
805 // then the call to humongous_obj_allocate_find_first() above 768 }
806 // should have succeeded and we wouldn't be here. 769 } else {
807 // 770 // We can't allocate humongous regions spanning more than one region while
808 // We should only be trying to expand when the free suffix is 771 // cleanupComplete() is running, since some of the regions we find to be
809 // not sufficient for the object _and_ we have some expansion 772 // empty might not yet be added to the free list. It is not straightforward
810 // room available. 773 // to know in which list they are on so that we can remove them. We only
811 assert(num_regions > fs, "earlier allocation should have succeeded"); 774 // need to do this if we need to allocate more than one region to satisfy the
812 775 // current humongous allocation request. If we are only allocating one region
776 // we use the one-region region allocation code (see above), that already
777 // potentially waits for regions from the secondary free list.
778 wait_while_free_regions_coming();
779 append_secondary_free_list_if_not_empty_with_lock();
780
781 // Policy: Try only empty regions (i.e. already committed first). Maybe we
782 // are lucky enough to find some.
783 first = _hrm.find_contiguous_only_empty(obj_regions);
784 if (first != G1_NO_HRM_INDEX) {
785 _hrm.allocate_free_regions_starting_at(first, obj_regions);
786 }
787 }
788
789 if (first == G1_NO_HRM_INDEX) {
790 // Policy: We could not find enough regions for the humongous object in the
791 // free list. Look through the heap to find a mix of free and uncommitted regions.
792 // If so, try expansion.
793 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
794 if (first != G1_NO_HRM_INDEX) {
795 // We found something. Make sure these regions are committed, i.e. expand
796 // the heap. Alternatively we could do a defragmentation GC.
813 ergo_verbose1(ErgoHeapSizing, 797 ergo_verbose1(ErgoHeapSizing,
814 "attempt heap expansion", 798 "attempt heap expansion",
815 ergo_format_reason("humongous allocation request failed") 799 ergo_format_reason("humongous allocation request failed")
816 ergo_format_byte("allocation request"), 800 ergo_format_byte("allocation request"),
817 word_size * HeapWordSize); 801 word_size * HeapWordSize);
818 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { 802
819 // Even though the heap was expanded, it might not have 803 _hrm.expand_at(first, obj_regions);
820 // reached the desired size. So, we cannot assume that the 804 g1_policy()->record_new_heap_size(num_regions());
821 // allocation will succeed. 805
822 first = humongous_obj_allocate_find_first(num_regions, word_size); 806 #ifdef ASSERT
807 for (uint i = first; i < first + obj_regions; ++i) {
808 HeapRegion* hr = region_at(i);
809 assert(hr->is_free(), "sanity");
810 assert(hr->is_empty(), "sanity");
811 assert(is_on_master_free_list(hr), "sanity");
823 } 812 }
813 #endif
814 _hrm.allocate_free_regions_starting_at(first, obj_regions);
815 } else {
816 // Policy: Potentially trigger a defragmentation GC.
824 } 817 }
825 } 818 }
826 819
827 HeapWord* result = NULL; 820 HeapWord* result = NULL;
828 if (first != G1_NULL_HRS_INDEX) { 821 if (first != G1_NO_HRM_INDEX) {
829 result = 822 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
830 humongous_obj_allocate_initialize_regions(first, num_regions, word_size); 823 word_size, context);
831 assert(result != NULL, "it should always return a valid result"); 824 assert(result != NULL, "it should always return a valid result");
832 825
833 // A successful humongous object allocation changes the used space 826 // A successful humongous object allocation changes the used space
834 // information of the old generation so we need to recalculate the 827 // information of the old generation so we need to recalculate the
835 // sizes and update the jstat counters here. 828 // sizes and update the jstat counters here.
869 return result; 862 return result;
870 } 863 }
871 864
872 // Create the garbage collection operation... 865 // Create the garbage collection operation...
873 VM_G1CollectForAllocation op(gc_count_before, word_size); 866 VM_G1CollectForAllocation op(gc_count_before, word_size);
867 op.set_allocation_context(AllocationContext::current());
868
874 // ...and get the VM thread to execute it. 869 // ...and get the VM thread to execute it.
875 VMThread::execute(&op); 870 VMThread::execute(&op);
876 871
877 if (op.prologue_succeeded() && op.pause_succeeded()) { 872 if (op.prologue_succeeded() && op.pause_succeeded()) {
878 // If the operation was successful we'll return the result even 873 // If the operation was successful we'll return the result even
904 ShouldNotReachHere(); 899 ShouldNotReachHere();
905 return NULL; 900 return NULL;
906 } 901 }
907 902
908 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, 903 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
909 unsigned int *gc_count_before_ret, 904 AllocationContext_t context,
910 int* gclocker_retry_count_ret) { 905 unsigned int *gc_count_before_ret,
906 int* gclocker_retry_count_ret) {
911 // Make sure you read the note in attempt_allocation_humongous(). 907 // Make sure you read the note in attempt_allocation_humongous().
912 908
913 assert_heap_not_locked_and_not_at_safepoint(); 909 assert_heap_not_locked_and_not_at_safepoint();
914 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " 910 assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
915 "be called for humongous allocation requests"); 911 "be called for humongous allocation requests");
926 bool should_try_gc; 922 bool should_try_gc;
927 unsigned int gc_count_before; 923 unsigned int gc_count_before;
928 924
929 { 925 {
930 MutexLockerEx x(Heap_lock); 926 MutexLockerEx x(Heap_lock);
931 927 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
932 result = _mutator_alloc_region.attempt_allocation_locked(word_size, 928 false /* bot_updates */);
933 false /* bot_updates */);
934 if (result != NULL) { 929 if (result != NULL) {
935 return result; 930 return result;
936 } 931 }
937 932
938 // If we reach here, attempt_allocation_locked() above failed to 933 // If we reach here, attempt_allocation_locked() above failed to
939 // allocate a new region. So the mutator alloc region should be NULL. 934 // allocate a new region. So the mutator alloc region should be NULL.
940 assert(_mutator_alloc_region.get() == NULL, "only way to get here"); 935 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
941 936
942 if (GC_locker::is_active_and_needs_gc()) { 937 if (GC_locker::is_active_and_needs_gc()) {
943 if (g1_policy()->can_expand_young_list()) { 938 if (g1_policy()->can_expand_young_list()) {
944 // No need for an ergo verbose message here, 939 // No need for an ergo verbose message here,
945 // can_expand_young_list() does this when it returns true. 940 // can_expand_young_list() does this when it returns true.
946 result = _mutator_alloc_region.attempt_allocation_force(word_size, 941 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
947 false /* bot_updates */); 942 false /* bot_updates */);
948 if (result != NULL) { 943 if (result != NULL) {
949 return result; 944 return result;
950 } 945 }
951 } 946 }
952 should_try_gc = false; 947 should_try_gc = false;
1002 // allocation attempt in case another thread successfully 997 // allocation attempt in case another thread successfully
1003 // performed a collection and reclaimed enough space. We do the 998 // performed a collection and reclaimed enough space. We do the
1004 // first attempt (without holding the Heap_lock) here and the 999 // first attempt (without holding the Heap_lock) here and the
1005 // follow-on attempt will be at the start of the next loop 1000 // follow-on attempt will be at the start of the next loop
1006 // iteration (after taking the Heap_lock). 1001 // iteration (after taking the Heap_lock).
1007 result = _mutator_alloc_region.attempt_allocation(word_size, 1002 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
1008 false /* bot_updates */); 1003 false /* bot_updates */);
1009 if (result != NULL) { 1004 if (result != NULL) {
1010 return result; 1005 return result;
1011 } 1006 }
1012 1007
1013 // Give a warning if we seem to be looping forever. 1008 // Give a warning if we seem to be looping forever.
1021 ShouldNotReachHere(); 1016 ShouldNotReachHere();
1022 return NULL; 1017 return NULL;
1023 } 1018 }
1024 1019
1025 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, 1020 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1026 unsigned int * gc_count_before_ret, 1021 unsigned int * gc_count_before_ret,
1027 int* gclocker_retry_count_ret) { 1022 int* gclocker_retry_count_ret) {
1028 // The structure of this method has a lot of similarities to 1023 // The structure of this method has a lot of similarities to
1029 // attempt_allocation_slow(). The reason these two were not merged 1024 // attempt_allocation_slow(). The reason these two were not merged
1030 // into a single one is that such a method would require several "if 1025 // into a single one is that such a method would require several "if
1031 // allocation is not humongous do this, otherwise do that" 1026 // allocation is not humongous do this, otherwise do that"
1032 // conditional paths which would obscure its flow. In fact, an early 1027 // conditional paths which would obscure its flow. In fact, an early
1063 MutexLockerEx x(Heap_lock); 1058 MutexLockerEx x(Heap_lock);
1064 1059
1065 // Given that humongous objects are not allocated in young 1060 // Given that humongous objects are not allocated in young
1066 // regions, we'll first try to do the allocation without doing a 1061 // regions, we'll first try to do the allocation without doing a
1067 // collection hoping that there's enough space in the heap. 1062 // collection hoping that there's enough space in the heap.
1068 result = humongous_obj_allocate(word_size); 1063 result = humongous_obj_allocate(word_size, AllocationContext::current());
1069 if (result != NULL) { 1064 if (result != NULL) {
1070 return result; 1065 return result;
1071 } 1066 }
1072 1067
1073 if (GC_locker::is_active_and_needs_gc()) { 1068 if (GC_locker::is_active_and_needs_gc()) {
1139 ShouldNotReachHere(); 1134 ShouldNotReachHere();
1140 return NULL; 1135 return NULL;
1141 } 1136 }
1142 1137
1143 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, 1138 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1144 bool expect_null_mutator_alloc_region) { 1139 AllocationContext_t context,
1140 bool expect_null_mutator_alloc_region) {
1145 assert_at_safepoint(true /* should_be_vm_thread */); 1141 assert_at_safepoint(true /* should_be_vm_thread */);
1146 assert(_mutator_alloc_region.get() == NULL || 1142 assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1147 !expect_null_mutator_alloc_region, 1143 !expect_null_mutator_alloc_region,
1148 "the current alloc region was unexpectedly found to be non-NULL"); 1144 "the current alloc region was unexpectedly found to be non-NULL");
1149 1145
1150 if (!isHumongous(word_size)) { 1146 if (!isHumongous(word_size)) {
1151 return _mutator_alloc_region.attempt_allocation_locked(word_size, 1147 return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1152 false /* bot_updates */); 1148 false /* bot_updates */);
1153 } else { 1149 } else {
1154 HeapWord* result = humongous_obj_allocate(word_size); 1150 HeapWord* result = humongous_obj_allocate(word_size, context);
1155 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { 1151 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1156 g1_policy()->set_initiate_conc_mark_if_possible(); 1152 g1_policy()->set_initiate_conc_mark_if_possible();
1157 } 1153 }
1158 return result; 1154 return result;
1159 } 1155 }
1236 private: 1232 private:
1237 G1HRPrinter* _hr_printer; 1233 G1HRPrinter* _hr_printer;
1238 public: 1234 public:
1239 bool doHeapRegion(HeapRegion* hr) { 1235 bool doHeapRegion(HeapRegion* hr) {
1240 assert(!hr->is_young(), "not expecting to find young regions"); 1236 assert(!hr->is_young(), "not expecting to find young regions");
1241 // We only generate output for non-empty regions. 1237 if (hr->is_free()) {
1242 if (!hr->is_empty()) { 1238 // We only generate output for non-empty regions.
1243 if (!hr->isHumongous()) { 1239 } else if (hr->startsHumongous()) {
1244 _hr_printer->post_compaction(hr, G1HRPrinter::Old); 1240 if (hr->region_num() == 1) {
1245 } else if (hr->startsHumongous()) { 1241 // single humongous region
1246 if (hr->region_num() == 1) { 1242 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1247 // single humongous region
1248 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1249 } else {
1250 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1251 }
1252 } else { 1243 } else {
1253 assert(hr->continuesHumongous(), "only way to get here"); 1244 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1254 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1255 } 1245 }
1246 } else if (hr->continuesHumongous()) {
1247 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1248 } else if (hr->is_old()) {
1249 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1250 } else {
1251 ShouldNotReachHere();
1256 } 1252 }
1257 return false; 1253 return false;
1258 } 1254 }
1259 1255
1260 PostCompactionPrinterClosure(G1HRPrinter* hr_printer) 1256 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1261 : _hr_printer(hr_printer) { } 1257 : _hr_printer(hr_printer) { }
1262 }; 1258 };
1263 1259
1264 void G1CollectedHeap::print_hrs_post_compaction() { 1260 void G1CollectedHeap::print_hrm_post_compaction() {
1265 PostCompactionPrinterClosure cl(hr_printer()); 1261 PostCompactionPrinterClosure cl(hr_printer());
1266 heap_region_iterate(&cl); 1262 heap_region_iterate(&cl);
1267 } 1263 }
1268 1264
1269 bool G1CollectedHeap::do_collection(bool explicit_gc, 1265 bool G1CollectedHeap::do_collection(bool explicit_gc,
1303 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); 1299 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1304 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); 1300 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1305 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); 1301 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1306 1302
1307 { 1303 {
1308 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); 1304 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1309 TraceCollectorStats tcs(g1mm()->full_collection_counters()); 1305 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1310 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); 1306 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1311 1307
1312 double start = os::elapsedTime(); 1308 double start = os::elapsedTime();
1313 g1_policy()->record_full_collection_start(); 1309 g1_policy()->record_full_collection_start();
1332 1328
1333 assert(used() == recalculate_used(), "Should be equal"); 1329 assert(used() == recalculate_used(), "Should be equal");
1334 1330
1335 verify_before_gc(); 1331 verify_before_gc();
1336 1332
1333 check_bitmaps("Full GC Start");
1337 pre_full_gc_dump(gc_timer); 1334 pre_full_gc_dump(gc_timer);
1338 1335
1339 COMPILER2_PRESENT(DerivedPointerTable::clear()); 1336 COMPILER2_PRESENT(DerivedPointerTable::clear());
1340 1337
1341 // Disable discovery and empty the discovered lists 1338 // Disable discovery and empty the discovered lists
1348 // refinement, if any are in progress. We have to do this before 1345 // refinement, if any are in progress. We have to do this before
1349 // wait_until_scan_finished() below. 1346 // wait_until_scan_finished() below.
1350 concurrent_mark()->abort(); 1347 concurrent_mark()->abort();
1351 1348
1352 // Make sure we'll choose a new allocation region afterwards. 1349 // Make sure we'll choose a new allocation region afterwards.
1353 release_mutator_alloc_region(); 1350 _allocator->release_mutator_alloc_region();
1354 abandon_gc_alloc_regions(); 1351 _allocator->abandon_gc_alloc_regions();
1355 g1_rem_set()->cleanupHRRS(); 1352 g1_rem_set()->cleanupHRRS();
1356 1353
1357 // We should call this after we retire any currently active alloc 1354 // We should call this after we retire any currently active alloc
1358 // regions so that all the ALLOC / RETIRE events are generated 1355 // regions so that all the ALLOC / RETIRE events are generated
1359 // before the start GC event. 1356 // before the start GC event.
1387 { 1384 {
1388 HandleMark hm; // Discard invalid handles created during gc 1385 HandleMark hm; // Discard invalid handles created during gc
1389 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); 1386 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1390 } 1387 }
1391 1388
1392 assert(free_regions() == 0, "we should not have added any free regions"); 1389 assert(num_free_regions() == 0, "we should not have added any free regions");
1393 rebuild_region_sets(false /* free_list_only */); 1390 rebuild_region_sets(false /* free_list_only */);
1394 1391
1395 // Enqueue any discovered reference objects that have 1392 // Enqueue any discovered reference objects that have
1396 // not been removed from the discovered lists. 1393 // not been removed from the discovered lists.
1397 ref_processor_stw()->enqueue_discovered_references(); 1394 ref_processor_stw()->enqueue_discovered_references();
1427 if (_hr_printer.is_active()) { 1424 if (_hr_printer.is_active()) {
1428 // We should do this after we potentially resize the heap so 1425 // We should do this after we potentially resize the heap so
1429 // that all the COMMIT / UNCOMMIT events are generated before 1426 // that all the COMMIT / UNCOMMIT events are generated before
1430 // the end GC event. 1427 // the end GC event.
1431 1428
1432 print_hrs_post_compaction(); 1429 print_hrm_post_compaction();
1433 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); 1430 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1434 } 1431 }
1435 1432
1436 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 1433 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1437 if (hot_card_cache->use_cache()) { 1434 if (hot_card_cache->use_cache()) {
1487 ParallelTaskTerminator::print_termination_counts(); 1484 ParallelTaskTerminator::print_termination_counts();
1488 #endif 1485 #endif
1489 1486
1490 // Discard all rset updates 1487 // Discard all rset updates
1491 JavaThread::dirty_card_queue_set().abandon_logs(); 1488 JavaThread::dirty_card_queue_set().abandon_logs();
1492 assert(!G1DeferredRSUpdate 1489 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1493 || (G1DeferredRSUpdate &&
1494 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1495 1490
1496 _young_list->reset_sampled_info(); 1491 _young_list->reset_sampled_info();
1497 // At this point there should be no regions in the 1492 // At this point there should be no regions in the
1498 // entire heap tagged as young. 1493 // entire heap tagged as young.
1499 assert(check_young_list_empty(true /* check_heap */), 1494 assert(check_young_list_empty(true /* check_heap */),
1500 "young list should be empty at this point"); 1495 "young list should be empty at this point");
1501 1496
1502 // Update the number of full collections that have been completed. 1497 // Update the number of full collections that have been completed.
1503 increment_old_marking_cycles_completed(false /* concurrent */); 1498 increment_old_marking_cycles_completed(false /* concurrent */);
1504 1499
1505 _hrs.verify_optional(); 1500 _hrm.verify_optional();
1506 verify_region_sets_optional(); 1501 verify_region_sets_optional();
1507 1502
1508 verify_after_gc(); 1503 verify_after_gc();
1504
1505 // Clear the previous marking bitmap, if needed for bitmap verification.
1506 // Note we cannot do this when we clear the next marking bitmap in
1507 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1508 // objects marked during a full GC against the previous bitmap.
1509 // But we need to clear it before calling check_bitmaps below since
1510 // the full GC has compacted objects and updated TAMS but not updated
1511 // the prev bitmap.
1512 if (G1VerifyBitmaps) {
1513 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1514 }
1515 check_bitmaps("Full GC End");
1509 1516
1510 // Start a new incremental collection set for the next pause 1517 // Start a new incremental collection set for the next pause
1511 assert(g1_policy()->collection_set() == NULL, "must be"); 1518 assert(g1_policy()->collection_set() == NULL, "must be");
1512 g1_policy()->start_incremental_cset_building(); 1519 g1_policy()->start_incremental_cset_building();
1513 1520
1514 // Clear the _cset_fast_test bitmap in anticipation of adding
1515 // regions to the incremental collection set for the next
1516 // evacuation pause.
1517 clear_cset_fast_test(); 1521 clear_cset_fast_test();
1518 1522
1519 init_mutator_alloc_region(); 1523 _allocator->init_mutator_alloc_region();
1520 1524
1521 double end = os::elapsedTime(); 1525 double end = os::elapsedTime();
1522 g1_policy()->record_full_collection_end(); 1526 g1_policy()->record_full_collection_end();
1523 1527
1524 if (G1Log::fine()) { 1528 if (G1Log::fine()) {
1650 } 1654 }
1651 1655
1652 1656
1653 HeapWord* 1657 HeapWord*
1654 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, 1658 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1659 AllocationContext_t context,
1655 bool* succeeded) { 1660 bool* succeeded) {
1656 assert_at_safepoint(true /* should_be_vm_thread */); 1661 assert_at_safepoint(true /* should_be_vm_thread */);
1657 1662
1658 *succeeded = true; 1663 *succeeded = true;
1659 // Let's attempt the allocation first. 1664 // Let's attempt the allocation first.
1660 HeapWord* result = 1665 HeapWord* result =
1661 attempt_allocation_at_safepoint(word_size, 1666 attempt_allocation_at_safepoint(word_size,
1662 false /* expect_null_mutator_alloc_region */); 1667 context,
1668 false /* expect_null_mutator_alloc_region */);
1663 if (result != NULL) { 1669 if (result != NULL) {
1664 assert(*succeeded, "sanity"); 1670 assert(*succeeded, "sanity");
1665 return result; 1671 return result;
1666 } 1672 }
1667 1673
1668 // In a G1 heap, we're supposed to keep allocation from failing by 1674 // In a G1 heap, we're supposed to keep allocation from failing by
1669 // incremental pauses. Therefore, at least for now, we'll favor 1675 // incremental pauses. Therefore, at least for now, we'll favor
1670 // expansion over collection. (This might change in the future if we can 1676 // expansion over collection. (This might change in the future if we can
1671 // do something smarter than full collection to satisfy a failed alloc.) 1677 // do something smarter than full collection to satisfy a failed alloc.)
1672 result = expand_and_allocate(word_size); 1678 result = expand_and_allocate(word_size, context);
1673 if (result != NULL) { 1679 if (result != NULL) {
1674 assert(*succeeded, "sanity"); 1680 assert(*succeeded, "sanity");
1675 return result; 1681 return result;
1676 } 1682 }
1677 1683
1684 return NULL; 1690 return NULL;
1685 } 1691 }
1686 1692
1687 // Retry the allocation 1693 // Retry the allocation
1688 result = attempt_allocation_at_safepoint(word_size, 1694 result = attempt_allocation_at_safepoint(word_size,
1689 true /* expect_null_mutator_alloc_region */); 1695 context,
1696 true /* expect_null_mutator_alloc_region */);
1690 if (result != NULL) { 1697 if (result != NULL) {
1691 assert(*succeeded, "sanity"); 1698 assert(*succeeded, "sanity");
1692 return result; 1699 return result;
1693 } 1700 }
1694 1701
1701 return NULL; 1708 return NULL;
1702 } 1709 }
1703 1710
1704 // Retry the allocation once more 1711 // Retry the allocation once more
1705 result = attempt_allocation_at_safepoint(word_size, 1712 result = attempt_allocation_at_safepoint(word_size,
1706 true /* expect_null_mutator_alloc_region */); 1713 context,
1714 true /* expect_null_mutator_alloc_region */);
1707 if (result != NULL) { 1715 if (result != NULL) {
1708 assert(*succeeded, "sanity"); 1716 assert(*succeeded, "sanity");
1709 return result; 1717 return result;
1710 } 1718 }
1711 1719
1723 // Attempting to expand the heap sufficiently 1731 // Attempting to expand the heap sufficiently
1724 // to support an allocation of the given "word_size". If 1732 // to support an allocation of the given "word_size". If
1725 // successful, perform the allocation and return the address of the 1733 // successful, perform the allocation and return the address of the
1726 // allocated block, or else "NULL". 1734 // allocated block, or else "NULL".
1727 1735
1728 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1736 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1729 assert_at_safepoint(true /* should_be_vm_thread */); 1737 assert_at_safepoint(true /* should_be_vm_thread */);
1730 1738
1731 verify_region_sets_optional(); 1739 verify_region_sets_optional();
1732 1740
1733 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); 1741 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1735 "attempt heap expansion", 1743 "attempt heap expansion",
1736 ergo_format_reason("allocation request failed") 1744 ergo_format_reason("allocation request failed")
1737 ergo_format_byte("allocation request"), 1745 ergo_format_byte("allocation request"),
1738 word_size * HeapWordSize); 1746 word_size * HeapWordSize);
1739 if (expand(expand_bytes)) { 1747 if (expand(expand_bytes)) {
1740 _hrs.verify_optional(); 1748 _hrm.verify_optional();
1741 verify_region_sets_optional(); 1749 verify_region_sets_optional();
1742 return attempt_allocation_at_safepoint(word_size, 1750 return attempt_allocation_at_safepoint(word_size,
1743 false /* expect_null_mutator_alloc_region */); 1751 context,
1752 false /* expect_null_mutator_alloc_region */);
1744 } 1753 }
1745 return NULL; 1754 return NULL;
1746 }
1747
1748 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1749 HeapWord* new_end) {
1750 assert(old_end != new_end, "don't call this otherwise");
1751 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1752
1753 // Update the committed mem region.
1754 _g1_committed.set_end(new_end);
1755 // Tell the card table about the update.
1756 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1757 // Tell the BOT about the update.
1758 _bot_shared->resize(_g1_committed.word_size());
1759 // Tell the hot card cache about the update
1760 _cg1r->hot_card_cache()->resize_card_counts(capacity());
1761 } 1755 }
1762 1756
1763 bool G1CollectedHeap::expand(size_t expand_bytes) { 1757 bool G1CollectedHeap::expand(size_t expand_bytes) {
1764 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); 1758 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1765 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1759 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1768 "expand the heap", 1762 "expand the heap",
1769 ergo_format_byte("requested expansion amount") 1763 ergo_format_byte("requested expansion amount")
1770 ergo_format_byte("attempted expansion amount"), 1764 ergo_format_byte("attempted expansion amount"),
1771 expand_bytes, aligned_expand_bytes); 1765 expand_bytes, aligned_expand_bytes);
1772 1766
1773 if (_g1_storage.uncommitted_size() == 0) { 1767 if (is_maximal_no_gc()) {
1774 ergo_verbose0(ErgoHeapSizing, 1768 ergo_verbose0(ErgoHeapSizing,
1775 "did not expand the heap", 1769 "did not expand the heap",
1776 ergo_format_reason("heap already fully expanded")); 1770 ergo_format_reason("heap already fully expanded"));
1777 return false; 1771 return false;
1778 } 1772 }
1779 1773
1780 // First commit the memory. 1774 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1781 HeapWord* old_end = (HeapWord*) _g1_storage.high(); 1775 assert(regions_to_expand > 0, "Must expand by at least one region");
1782 bool successful = _g1_storage.expand_by(aligned_expand_bytes); 1776
1783 if (successful) { 1777 uint expanded_by = _hrm.expand_by(regions_to_expand);
1784 // Then propagate this update to the necessary data structures. 1778
1785 HeapWord* new_end = (HeapWord*) _g1_storage.high(); 1779 if (expanded_by > 0) {
1786 update_committed_space(old_end, new_end); 1780 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1787
1788 FreeRegionList expansion_list("Local Expansion List");
1789 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1790 assert(mr.start() == old_end, "post-condition");
1791 // mr might be a smaller region than what was requested if
1792 // expand_by() was unable to allocate the HeapRegion instances
1793 assert(mr.end() <= new_end, "post-condition");
1794
1795 size_t actual_expand_bytes = mr.byte_size();
1796 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); 1781 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1797 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), 1782 g1_policy()->record_new_heap_size(num_regions());
1798 "post-condition");
1799 if (actual_expand_bytes < aligned_expand_bytes) {
1800 // We could not expand _hrs to the desired size. In this case we
1801 // need to shrink the committed space accordingly.
1802 assert(mr.end() < new_end, "invariant");
1803
1804 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1805 // First uncommit the memory.
1806 _g1_storage.shrink_by(diff_bytes);
1807 // Then propagate this update to the necessary data structures.
1808 update_committed_space(new_end, mr.end());
1809 }
1810 _free_list.add_as_tail(&expansion_list);
1811
1812 if (_hr_printer.is_active()) {
1813 HeapWord* curr = mr.start();
1814 while (curr < mr.end()) {
1815 HeapWord* curr_end = curr + HeapRegion::GrainWords;
1816 _hr_printer.commit(curr, curr_end);
1817 curr = curr_end;
1818 }
1819 assert(curr == mr.end(), "post-condition");
1820 }
1821 g1_policy()->record_new_heap_size(n_regions());
1822 } else { 1783 } else {
1823 ergo_verbose0(ErgoHeapSizing, 1784 ergo_verbose0(ErgoHeapSizing,
1824 "did not expand the heap", 1785 "did not expand the heap",
1825 ergo_format_reason("heap expansion operation failed")); 1786 ergo_format_reason("heap expansion operation failed"));
1826 // The expansion of the virtual storage space was unsuccessful. 1787 // The expansion of the virtual storage space was unsuccessful.
1827 // Let's see if it was because we ran out of swap. 1788 // Let's see if it was because we ran out of swap.
1828 if (G1ExitOnExpansionFailure && 1789 if (G1ExitOnExpansionFailure &&
1829 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { 1790 _hrm.available() >= regions_to_expand) {
1830 // We had head room... 1791 // We had head room...
1831 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); 1792 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1832 } 1793 }
1833 } 1794 }
1834 return successful; 1795 return regions_to_expand > 0;
1835 } 1796 }
1836 1797
1837 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { 1798 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1838 size_t aligned_shrink_bytes = 1799 size_t aligned_shrink_bytes =
1839 ReservedSpace::page_align_size_down(shrink_bytes); 1800 ReservedSpace::page_align_size_down(shrink_bytes);
1840 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1801 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1841 HeapRegion::GrainBytes); 1802 HeapRegion::GrainBytes);
1842 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); 1803 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1843 1804
1844 uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); 1805 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1845 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1846 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; 1806 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1847 1807
1848 ergo_verbose3(ErgoHeapSizing, 1808 ergo_verbose3(ErgoHeapSizing,
1849 "shrink the heap", 1809 "shrink the heap",
1850 ergo_format_byte("requested shrinking amount") 1810 ergo_format_byte("requested shrinking amount")
1851 ergo_format_byte("aligned shrinking amount") 1811 ergo_format_byte("aligned shrinking amount")
1852 ergo_format_byte("attempted shrinking amount"), 1812 ergo_format_byte("attempted shrinking amount"),
1853 shrink_bytes, aligned_shrink_bytes, shrunk_bytes); 1813 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1854 if (num_regions_removed > 0) { 1814 if (num_regions_removed > 0) {
1855 _g1_storage.shrink_by(shrunk_bytes); 1815 g1_policy()->record_new_heap_size(num_regions());
1856 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1857
1858 if (_hr_printer.is_active()) {
1859 HeapWord* curr = old_end;
1860 while (curr > new_end) {
1861 HeapWord* curr_end = curr;
1862 curr -= HeapRegion::GrainWords;
1863 _hr_printer.uncommit(curr, curr_end);
1864 }
1865 }
1866
1867 _expansion_regions += num_regions_removed;
1868 update_committed_space(old_end, new_end);
1869 HeapRegionRemSet::shrink_heap(n_regions());
1870 g1_policy()->record_new_heap_size(n_regions());
1871 } else { 1816 } else {
1872 ergo_verbose0(ErgoHeapSizing, 1817 ergo_verbose0(ErgoHeapSizing,
1873 "did not shrink the heap", 1818 "did not shrink the heap",
1874 ergo_format_reason("heap shrinking operation failed")); 1819 ergo_format_reason("heap shrinking operation failed"));
1875 } 1820 }
1879 verify_region_sets_optional(); 1824 verify_region_sets_optional();
1880 1825
1881 // We should only reach here at the end of a Full GC which means we 1826 // We should only reach here at the end of a Full GC which means we
1882 // should not not be holding to any GC alloc regions. The method 1827 // should not not be holding to any GC alloc regions. The method
1883 // below will make sure of that and do any remaining clean up. 1828 // below will make sure of that and do any remaining clean up.
1884 abandon_gc_alloc_regions(); 1829 _allocator->abandon_gc_alloc_regions();
1885 1830
1886 // Instead of tearing down / rebuilding the free lists here, we 1831 // Instead of tearing down / rebuilding the free lists here, we
1887 // could instead use the remove_all_pending() method on free_list to 1832 // could instead use the remove_all_pending() method on free_list to
1888 // remove only the ones that we need to remove. 1833 // remove only the ones that we need to remove.
1889 tear_down_region_sets(true /* free_list_only */); 1834 tear_down_region_sets(true /* free_list_only */);
1890 shrink_helper(shrink_bytes); 1835 shrink_helper(shrink_bytes);
1891 rebuild_region_sets(true /* free_list_only */); 1836 rebuild_region_sets(true /* free_list_only */);
1892 1837
1893 _hrs.verify_optional(); 1838 _hrm.verify_optional();
1894 verify_region_sets_optional(); 1839 verify_region_sets_optional();
1895 } 1840 }
1896 1841
1897 // Public methods. 1842 // Public methods.
1898 1843
1912 _ref_processor_stw(NULL), 1857 _ref_processor_stw(NULL),
1913 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), 1858 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1914 _bot_shared(NULL), 1859 _bot_shared(NULL),
1915 _evac_failure_scan_stack(NULL), 1860 _evac_failure_scan_stack(NULL),
1916 _mark_in_progress(false), 1861 _mark_in_progress(false),
1917 _cg1r(NULL), _summary_bytes_used(0), 1862 _cg1r(NULL),
1918 _g1mm(NULL), 1863 _g1mm(NULL),
1919 _refine_cte_cl(NULL), 1864 _refine_cte_cl(NULL),
1920 _full_collection(false), 1865 _full_collection(false),
1921 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1922 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), 1866 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1923 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), 1867 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1924 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), 1868 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1869 _humongous_is_live(),
1870 _has_humongous_reclaim_candidates(false),
1925 _free_regions_coming(false), 1871 _free_regions_coming(false),
1926 _young_list(new YoungList(this)), 1872 _young_list(new YoungList(this)),
1927 _gc_time_stamp(0), 1873 _gc_time_stamp(0),
1928 _retained_old_gc_alloc_region(NULL),
1929 _survivor_plab_stats(YoungPLABSize, PLABWeight), 1874 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1930 _old_plab_stats(OldPLABSize, PLABWeight), 1875 _old_plab_stats(OldPLABSize, PLABWeight),
1931 _expand_heap_after_alloc_failure(true), 1876 _expand_heap_after_alloc_failure(true),
1932 _surviving_young_words(NULL), 1877 _surviving_young_words(NULL),
1933 _old_marking_cycles_started(0), 1878 _old_marking_cycles_started(0),
1934 _old_marking_cycles_completed(0), 1879 _old_marking_cycles_completed(0),
1935 _concurrent_cycle_started(false), 1880 _concurrent_cycle_started(false),
1936 _in_cset_fast_test(NULL), 1881 _in_cset_fast_test(),
1937 _in_cset_fast_test_base(NULL),
1938 _dirty_cards_region_list(NULL), 1882 _dirty_cards_region_list(NULL),
1939 _worker_cset_start_region(NULL), 1883 _worker_cset_start_region(NULL),
1940 _worker_cset_start_region_time_stamp(NULL), 1884 _worker_cset_start_region_time_stamp(NULL),
1941 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), 1885 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1942 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 1886 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1946 _g1h = this; 1890 _g1h = this;
1947 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1891 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1948 vm_exit_during_initialization("Failed necessary allocation."); 1892 vm_exit_during_initialization("Failed necessary allocation.");
1949 } 1893 }
1950 1894
1895 _allocator = G1Allocator::create_allocator(_g1h);
1951 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; 1896 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1952 1897
1953 int n_queues = MAX2((int)ParallelGCThreads, 1); 1898 int n_queues = MAX2((int)ParallelGCThreads, 1);
1954 _task_queues = new RefToScanQueueSet(n_queues); 1899 _task_queues = new RefToScanQueueSet(n_queues);
1955 1900
2002 // Ensure that the sizes are properly aligned. 1947 // Ensure that the sizes are properly aligned.
2003 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1948 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
2004 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1949 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2005 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap"); 1950 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
2006 1951
2007 _cg1r = new ConcurrentG1Refine(this); 1952 _refine_cte_cl = new RefineCardTableEntryClosure();
1953
1954 _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
2008 1955
2009 // Reserve the maximum. 1956 // Reserve the maximum.
2010 1957
2011 // When compressed oops are enabled, the preferred heap base 1958 // When compressed oops are enabled, the preferred heap base
2012 // is calculated by subtracting the requested size from the 1959 // is calculated by subtracting the requested size from the
2027 // happen in asserts: DLD.) 1974 // happen in asserts: DLD.)
2028 _reserved.set_word_size(0); 1975 _reserved.set_word_size(0);
2029 _reserved.set_start((HeapWord*)heap_rs.base()); 1976 _reserved.set_start((HeapWord*)heap_rs.base());
2030 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 1977 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2031 1978
2032 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2033
2034 // Create the gen rem set (and barrier set) for the entire reserved region. 1979 // Create the gen rem set (and barrier set) for the entire reserved region.
2035 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 1980 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2036 set_barrier_set(rem_set()->bs()); 1981 set_barrier_set(rem_set()->bs());
2037 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { 1982 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
2038 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); 1983 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
2042 // Also create a G1 rem set. 1987 // Also create a G1 rem set.
2043 _g1_rem_set = new G1RemSet(this, g1_barrier_set()); 1988 _g1_rem_set = new G1RemSet(this, g1_barrier_set());
2044 1989
2045 // Carve out the G1 part of the heap. 1990 // Carve out the G1 part of the heap.
2046 1991
2047 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 1992 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2048 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 1993 G1RegionToSpaceMapper* heap_storage =
2049 g1_rs.size()/HeapWordSize); 1994 G1RegionToSpaceMapper::create_mapper(g1_rs,
2050 1995 UseLargePages ? os::large_page_size() : os::vm_page_size(),
2051 _g1_storage.initialize(g1_rs, 0); 1996 HeapRegion::GrainBytes,
2052 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 1997 1,
2053 _hrs.initialize((HeapWord*) _g1_reserved.start(), 1998 mtJavaHeap);
2054 (HeapWord*) _g1_reserved.end()); 1999 heap_storage->set_mapping_changed_listener(&_listener);
2055 assert(_hrs.max_length() == _expansion_regions, 2000
2056 err_msg("max length: %u expansion regions: %u", 2001 // Reserve space for the block offset table. We do not support automatic uncommit
2057 _hrs.max_length(), _expansion_regions)); 2002 // for the card table at this time. BOT only.
2058 2003 ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
2059 // Do later initialization work for concurrent refinement. 2004 G1RegionToSpaceMapper* bot_storage =
2060 _cg1r->init(); 2005 G1RegionToSpaceMapper::create_mapper(bot_rs,
2006 os::vm_page_size(),
2007 HeapRegion::GrainBytes,
2008 G1BlockOffsetSharedArray::N_bytes,
2009 mtGC);
2010
2011 ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
2012 G1RegionToSpaceMapper* cardtable_storage =
2013 G1RegionToSpaceMapper::create_mapper(cardtable_rs,
2014 os::vm_page_size(),
2015 HeapRegion::GrainBytes,
2016 G1BlockOffsetSharedArray::N_bytes,
2017 mtGC);
2018
2019 // Reserve space for the card counts table.
2020 ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
2021 G1RegionToSpaceMapper* card_counts_storage =
2022 G1RegionToSpaceMapper::create_mapper(card_counts_rs,
2023 os::vm_page_size(),
2024 HeapRegion::GrainBytes,
2025 G1BlockOffsetSharedArray::N_bytes,
2026 mtGC);
2027
2028 // Reserve space for prev and next bitmap.
2029 size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
2030
2031 ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2032 G1RegionToSpaceMapper* prev_bitmap_storage =
2033 G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
2034 os::vm_page_size(),
2035 HeapRegion::GrainBytes,
2036 CMBitMap::mark_distance(),
2037 mtGC);
2038
2039 ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2040 G1RegionToSpaceMapper* next_bitmap_storage =
2041 G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
2042 os::vm_page_size(),
2043 HeapRegion::GrainBytes,
2044 CMBitMap::mark_distance(),
2045 mtGC);
2046
2047 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
2048 g1_barrier_set()->initialize(cardtable_storage);
2049 // Do later initialization work for concurrent refinement.
2050 _cg1r->init(card_counts_storage);
2061 2051
2062 // 6843694 - ensure that the maximum region index can fit 2052 // 6843694 - ensure that the maximum region index can fit
2063 // in the remembered set structures. 2053 // in the remembered set structures.
2064 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; 2054 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2065 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); 2055 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2069 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, 2059 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2070 "too many cards per region"); 2060 "too many cards per region");
2071 2061
2072 FreeRegionList::set_unrealistically_long_length(max_regions() + 1); 2062 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2073 2063
2074 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 2064 _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2075 heap_word_size(init_byte_size));
2076 2065
2077 _g1h = this; 2066 _g1h = this;
2078 2067
2079 _in_cset_fast_test_length = max_regions(); 2068 _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2080 _in_cset_fast_test_base = 2069 _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2081 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2082
2083 // We're biasing _in_cset_fast_test to avoid subtracting the
2084 // beginning of the heap every time we want to index; basically
2085 // it's the same with what we do with the card table.
2086 _in_cset_fast_test = _in_cset_fast_test_base -
2087 ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2088
2089 // Clear the _cset_fast_test bitmap in anticipation of adding
2090 // regions to the incremental collection set for the first
2091 // evacuation pause.
2092 clear_cset_fast_test();
2093 2070
2094 // Create the ConcurrentMark data structure and thread. 2071 // Create the ConcurrentMark data structure and thread.
2095 // (Must do this late, so that "max_regions" is defined.) 2072 // (Must do this late, so that "max_regions" is defined.)
2096 _cm = new ConcurrentMark(this, heap_rs); 2073 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2097 if (_cm == NULL || !_cm->completed_initialization()) { 2074 if (_cm == NULL || !_cm->completed_initialization()) {
2098 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark"); 2075 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2099 return JNI_ENOMEM; 2076 return JNI_ENOMEM;
2100 } 2077 }
2101 _cmThread = _cm->cmThread(); 2078 _cmThread = _cm->cmThread();
2110 } 2087 }
2111 2088
2112 // Perform any initialization actions delegated to the policy. 2089 // Perform any initialization actions delegated to the policy.
2113 g1_policy()->init(); 2090 g1_policy()->init();
2114 2091
2115 _refine_cte_cl =
2116 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
2117 g1_rem_set(),
2118 concurrent_g1_refine());
2119 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2120
2121 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 2092 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2122 SATB_Q_FL_lock, 2093 SATB_Q_FL_lock,
2123 G1SATBProcessCompletedThreshold, 2094 G1SATBProcessCompletedThreshold,
2124 Shared_SATB_Q_lock); 2095 Shared_SATB_Q_lock);
2125 2096
2126 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 2097 JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
2098 DirtyCardQ_CBL_mon,
2127 DirtyCardQ_FL_lock, 2099 DirtyCardQ_FL_lock,
2128 concurrent_g1_refine()->yellow_zone(), 2100 concurrent_g1_refine()->yellow_zone(),
2129 concurrent_g1_refine()->red_zone(), 2101 concurrent_g1_refine()->red_zone(),
2130 Shared_DirtyCardQ_lock); 2102 Shared_DirtyCardQ_lock);
2131 2103
2132 if (G1DeferredRSUpdate) { 2104 dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
2133 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 2105 DirtyCardQ_CBL_mon,
2134 DirtyCardQ_FL_lock, 2106 DirtyCardQ_FL_lock,
2135 -1, // never trigger processing 2107 -1, // never trigger processing
2136 -1, // no limit on length 2108 -1, // no limit on length
2137 Shared_DirtyCardQ_lock, 2109 Shared_DirtyCardQ_lock,
2138 &JavaThread::dirty_card_queue_set()); 2110 &JavaThread::dirty_card_queue_set());
2139 }
2140 2111
2141 // Initialize the card queue set used to hold cards containing 2112 // Initialize the card queue set used to hold cards containing
2142 // references into the collection set. 2113 // references into the collection set.
2143 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, 2114 _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2115 DirtyCardQ_CBL_mon,
2144 DirtyCardQ_FL_lock, 2116 DirtyCardQ_FL_lock,
2145 -1, // never trigger processing 2117 -1, // never trigger processing
2146 -1, // no limit on length 2118 -1, // no limit on length
2147 Shared_DirtyCardQ_lock, 2119 Shared_DirtyCardQ_lock,
2148 &JavaThread::dirty_card_queue_set()); 2120 &JavaThread::dirty_card_queue_set());
2149 2121
2150 // In case we're keeping closure specialization stats, initialize those 2122 // In case we're keeping closure specialization stats, initialize those
2151 // counts and that mechanism. 2123 // counts and that mechanism.
2152 SpecializationStats::clear(); 2124 SpecializationStats::clear();
2153 2125
2154 // Here we allocate the dummy full region that is required by the 2126 // Here we allocate the dummy HeapRegion that is required by the
2155 // G1AllocRegion class. If we don't pass an address in the reserved 2127 // G1AllocRegion class.
2156 // space here, lots of asserts fire. 2128 HeapRegion* dummy_region = _hrm.get_dummy_region();
2157 2129
2158 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2159 _g1_reserved.start());
2160 // We'll re-use the same region whether the alloc region will 2130 // We'll re-use the same region whether the alloc region will
2161 // require BOT updates or not and, if it doesn't, then a non-young 2131 // require BOT updates or not and, if it doesn't, then a non-young
2162 // region will complain that it cannot support allocations without 2132 // region will complain that it cannot support allocations without
2163 // BOT updates. So we'll tag the dummy region as young to avoid that. 2133 // BOT updates. So we'll tag the dummy region as eden to avoid that.
2164 dummy_region->set_young(); 2134 dummy_region->set_eden();
2165 // Make sure it's full. 2135 // Make sure it's full.
2166 dummy_region->set_top(dummy_region->end()); 2136 dummy_region->set_top(dummy_region->end());
2167 G1AllocRegion::setup(this, dummy_region); 2137 G1AllocRegion::setup(this, dummy_region);
2168 2138
2169 init_mutator_alloc_region(); 2139 _allocator->init_mutator_alloc_region();
2170 2140
2171 // Do create of the monitoring and management support so that 2141 // Do create of the monitoring and management support so that
2172 // values in the heap have been properly initialized. 2142 // values in the heap have been properly initialized.
2173 _g1mm = new G1MonitoringSupport(this); 2143 _g1mm = new G1MonitoringSupport(this);
2174 2144
2184 _cg1r->stop(); 2154 _cg1r->stop();
2185 _cmThread->stop(); 2155 _cmThread->stop();
2186 if (G1StringDedup::is_enabled()) { 2156 if (G1StringDedup::is_enabled()) {
2187 G1StringDedup::stop(); 2157 G1StringDedup::stop();
2188 } 2158 }
2159 }
2160
2161 void G1CollectedHeap::clear_humongous_is_live_table() {
2162 guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
2163 _humongous_is_live.clear();
2189 } 2164 }
2190 2165
2191 size_t G1CollectedHeap::conservative_max_heap_alignment() { 2166 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2192 return HeapRegion::max_region_size(); 2167 return HeapRegion::max_region_size();
2193 } 2168 }
2265 // is alive closure 2240 // is alive closure
2266 // (for efficiency/performance) 2241 // (for efficiency/performance)
2267 } 2242 }
2268 2243
2269 size_t G1CollectedHeap::capacity() const { 2244 size_t G1CollectedHeap::capacity() const {
2270 return _g1_committed.byte_size(); 2245 return _hrm.length() * HeapRegion::GrainBytes;
2271 } 2246 }
2272 2247
2273 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { 2248 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2274 assert(!hr->continuesHumongous(), "pre-condition"); 2249 assert(!hr->continuesHumongous(), "pre-condition");
2275 hr->reset_gc_time_stamp(); 2250 hr->reset_gc_time_stamp();
2276 if (hr->startsHumongous()) { 2251 if (hr->startsHumongous()) {
2277 uint first_index = hr->hrs_index() + 1; 2252 uint first_index = hr->hrm_index() + 1;
2278 uint last_index = hr->last_hc_index(); 2253 uint last_index = hr->last_hc_index();
2279 for (uint i = first_index; i < last_index; i += 1) { 2254 for (uint i = first_index; i < last_index; i += 1) {
2280 HeapRegion* chr = region_at(i); 2255 HeapRegion* chr = region_at(i);
2281 assert(chr->continuesHumongous(), "sanity"); 2256 assert(chr->continuesHumongous(), "sanity");
2282 chr->reset_gc_time_stamp(); 2257 chr->reset_gc_time_stamp();
2333 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); 2308 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2334 } 2309 }
2335 2310
2336 2311
2337 // Computes the sum of the storage used by the various regions. 2312 // Computes the sum of the storage used by the various regions.
2338
2339 size_t G1CollectedHeap::used() const { 2313 size_t G1CollectedHeap::used() const {
2340 assert(Heap_lock->owner() != NULL, 2314 return _allocator->used();
2341 "Should be owned on this thread's behalf.");
2342 size_t result = _summary_bytes_used;
2343 // Read only once in case it is set to NULL concurrently
2344 HeapRegion* hr = _mutator_alloc_region.get();
2345 if (hr != NULL)
2346 result += hr->used();
2347 return result;
2348 } 2315 }
2349 2316
2350 size_t G1CollectedHeap::used_unlocked() const { 2317 size_t G1CollectedHeap::used_unlocked() const {
2351 size_t result = _summary_bytes_used; 2318 return _allocator->used_unlocked();
2352 return result;
2353 } 2319 }
2354 2320
2355 class SumUsedClosure: public HeapRegionClosure { 2321 class SumUsedClosure: public HeapRegionClosure {
2356 size_t _used; 2322 size_t _used;
2357 public: 2323 public:
2373 2339
2374 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0); 2340 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2375 return blk.result(); 2341 return blk.result();
2376 } 2342 }
2377 2343
2378 size_t G1CollectedHeap::unsafe_max_alloc() {
2379 if (free_regions() > 0) return HeapRegion::GrainBytes;
2380 // otherwise, is there space in the current allocation region?
2381
2382 // We need to store the current allocation region in a local variable
2383 // here. The problem is that this method doesn't take any locks and
2384 // there may be other threads which overwrite the current allocation
2385 // region field. attempt_allocation(), for example, sets it to NULL
2386 // and this can happen *after* the NULL check here but before the call
2387 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2388 // to be a problem in the optimized build, since the two loads of the
2389 // current allocation region field are optimized away.
2390 HeapRegion* hr = _mutator_alloc_region.get();
2391 if (hr == NULL) {
2392 return 0;
2393 }
2394 return hr->free();
2395 }
2396
2397 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 2344 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2398 switch (cause) { 2345 switch (cause) {
2399 case GCCause::_gc_locker: return GCLockerInvokesConcurrent; 2346 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2400 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; 2347 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2401 case GCCause::_g1_humongous_allocation: return true; 2348 case GCCause::_g1_humongous_allocation: return true;
2349 case GCCause::_update_allocation_context_stats_inc: return true;
2402 default: return false; 2350 default: return false;
2403 } 2351 }
2404 } 2352 }
2405 2353
2406 #ifndef PRODUCT 2354 #ifndef PRODUCT
2410 // And as a result the region we'll allocate will be humongous. 2358 // And as a result the region we'll allocate will be humongous.
2411 guarantee(isHumongous(word_size), "sanity"); 2359 guarantee(isHumongous(word_size), "sanity");
2412 2360
2413 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { 2361 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2414 // Let's use the existing mechanism for the allocation 2362 // Let's use the existing mechanism for the allocation
2415 HeapWord* dummy_obj = humongous_obj_allocate(word_size); 2363 HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2364 AllocationContext::system());
2416 if (dummy_obj != NULL) { 2365 if (dummy_obj != NULL) {
2417 MemRegion mr(dummy_obj, word_size); 2366 MemRegion mr(dummy_obj, word_size);
2418 CollectedHeap::fill_with_object(mr); 2367 CollectedHeap::fill_with_object(mr);
2419 } else { 2368 } else {
2420 // If we can't allocate once, we probably cannot allocate 2369 // If we can't allocate once, we probably cannot allocate
2538 void G1CollectedHeap::collect(GCCause::Cause cause) { 2487 void G1CollectedHeap::collect(GCCause::Cause cause) {
2539 assert_heap_not_locked(); 2488 assert_heap_not_locked();
2540 2489
2541 unsigned int gc_count_before; 2490 unsigned int gc_count_before;
2542 unsigned int old_marking_count_before; 2491 unsigned int old_marking_count_before;
2492 unsigned int full_gc_count_before;
2543 bool retry_gc; 2493 bool retry_gc;
2544 2494
2545 do { 2495 do {
2546 retry_gc = false; 2496 retry_gc = false;
2547 2497
2548 { 2498 {
2549 MutexLocker ml(Heap_lock); 2499 MutexLocker ml(Heap_lock);
2550 2500
2551 // Read the GC count while holding the Heap_lock 2501 // Read the GC count while holding the Heap_lock
2552 gc_count_before = total_collections(); 2502 gc_count_before = total_collections();
2503 full_gc_count_before = total_full_collections();
2553 old_marking_count_before = _old_marking_cycles_started; 2504 old_marking_count_before = _old_marking_cycles_started;
2554 } 2505 }
2555 2506
2556 if (should_do_concurrent_full_gc(cause)) { 2507 if (should_do_concurrent_full_gc(cause)) {
2557 // Schedule an initial-mark evacuation pause that will start a 2508 // Schedule an initial-mark evacuation pause that will start a
2560 VM_G1IncCollectionPause op(gc_count_before, 2511 VM_G1IncCollectionPause op(gc_count_before,
2561 0, /* word_size */ 2512 0, /* word_size */
2562 true, /* should_initiate_conc_mark */ 2513 true, /* should_initiate_conc_mark */
2563 g1_policy()->max_pause_time_ms(), 2514 g1_policy()->max_pause_time_ms(),
2564 cause); 2515 cause);
2516 op.set_allocation_context(AllocationContext::current());
2565 2517
2566 VMThread::execute(&op); 2518 VMThread::execute(&op);
2567 if (!op.pause_succeeded()) { 2519 if (!op.pause_succeeded()) {
2568 if (old_marking_count_before == _old_marking_cycles_started) { 2520 if (old_marking_count_before == _old_marking_cycles_started) {
2569 retry_gc = op.should_retry_gc(); 2521 retry_gc = op.should_retry_gc();
2578 GC_locker::stall_until_clear(); 2530 GC_locker::stall_until_clear();
2579 } 2531 }
2580 } 2532 }
2581 } 2533 }
2582 } else { 2534 } else {
2583 if (cause == GCCause::_gc_locker 2535 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2584 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { 2536 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2585 2537
2586 // Schedule a standard evacuation pause. We're setting word_size 2538 // Schedule a standard evacuation pause. We're setting word_size
2587 // to 0 which means that we are not requesting a post-GC allocation. 2539 // to 0 which means that we are not requesting a post-GC allocation.
2588 VM_G1IncCollectionPause op(gc_count_before, 2540 VM_G1IncCollectionPause op(gc_count_before,
2591 g1_policy()->max_pause_time_ms(), 2543 g1_policy()->max_pause_time_ms(),
2592 cause); 2544 cause);
2593 VMThread::execute(&op); 2545 VMThread::execute(&op);
2594 } else { 2546 } else {
2595 // Schedule a Full GC. 2547 // Schedule a Full GC.
2596 VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause); 2548 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2597 VMThread::execute(&op); 2549 VMThread::execute(&op);
2598 } 2550 }
2599 } 2551 }
2600 } while (retry_gc); 2552 } while (retry_gc);
2601 } 2553 }
2602 2554
2603 bool G1CollectedHeap::is_in(const void* p) const { 2555 bool G1CollectedHeap::is_in(const void* p) const {
2604 if (_g1_committed.contains(p)) { 2556 if (_hrm.reserved().contains(p)) {
2605 // Given that we know that p is in the committed space, 2557 // Given that we know that p is in the reserved space,
2606 // heap_region_containing_raw() should successfully 2558 // heap_region_containing_raw() should successfully
2607 // return the containing region. 2559 // return the containing region.
2608 HeapRegion* hr = heap_region_containing_raw(p); 2560 HeapRegion* hr = heap_region_containing_raw(p);
2609 return hr->is_in(p); 2561 return hr->is_in(p);
2610 } else { 2562 } else {
2611 return false; 2563 return false;
2612 } 2564 }
2613 } 2565 }
2614 2566
2567 #ifdef ASSERT
2568 bool G1CollectedHeap::is_in_exact(const void* p) const {
2569 bool contains = reserved_region().contains(p);
2570 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2571 if (contains && available) {
2572 return true;
2573 } else {
2574 return false;
2575 }
2576 }
2577 #endif
2578
2615 // Iteration functions. 2579 // Iteration functions.
2616 2580
2617 // Iterates an OopClosure over all ref-containing fields of objects 2581 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2618 // within a HeapRegion.
2619 2582
2620 class IterateOopClosureRegionClosure: public HeapRegionClosure { 2583 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2621 MemRegion _mr;
2622 ExtendedOopClosure* _cl; 2584 ExtendedOopClosure* _cl;
2623 public: 2585 public:
2624 IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl) 2586 IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2625 : _mr(mr), _cl(cl) {}
2626 bool doHeapRegion(HeapRegion* r) { 2587 bool doHeapRegion(HeapRegion* r) {
2627 if (!r->continuesHumongous()) { 2588 if (!r->continuesHumongous()) {
2628 r->oop_iterate(_cl); 2589 r->oop_iterate(_cl);
2629 } 2590 }
2630 return false; 2591 return false;
2631 } 2592 }
2632 }; 2593 };
2633 2594
2634 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) { 2595 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2635 IterateOopClosureRegionClosure blk(_g1_committed, cl); 2596 IterateOopClosureRegionClosure blk(cl);
2636 heap_region_iterate(&blk);
2637 }
2638
2639 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
2640 IterateOopClosureRegionClosure blk(mr, cl);
2641 heap_region_iterate(&blk); 2597 heap_region_iterate(&blk);
2642 } 2598 }
2643 2599
2644 // Iterates an ObjectClosure over all objects within a HeapRegion. 2600 // Iterates an ObjectClosure over all objects within a HeapRegion.
2645 2601
2676 SpaceClosureRegionClosure blk(cl); 2632 SpaceClosureRegionClosure blk(cl);
2677 heap_region_iterate(&blk); 2633 heap_region_iterate(&blk);
2678 } 2634 }
2679 2635
2680 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { 2636 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2681 _hrs.iterate(cl); 2637 _hrm.iterate(cl);
2682 } 2638 }
2683 2639
2684 void 2640 void
2685 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 2641 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2686 uint worker_id, 2642 uint worker_id,
2687 uint no_of_par_workers, 2643 uint num_workers,
2688 jint claim_value) { 2644 jint claim_value) const {
2689 const uint regions = n_regions(); 2645 _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
2690 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2691 no_of_par_workers :
2692 1);
2693 assert(UseDynamicNumberOfGCThreads ||
2694 no_of_par_workers == workers()->total_workers(),
2695 "Non dynamic should use fixed number of workers");
2696 // try to spread out the starting points of the workers
2697 const HeapRegion* start_hr =
2698 start_region_for_worker(worker_id, no_of_par_workers);
2699 const uint start_index = start_hr->hrs_index();
2700
2701 // each worker will actually look at all regions
2702 for (uint count = 0; count < regions; ++count) {
2703 const uint index = (start_index + count) % regions;
2704 assert(0 <= index && index < regions, "sanity");
2705 HeapRegion* r = region_at(index);
2706 // we'll ignore "continues humongous" regions (we'll process them
2707 // when we come across their corresponding "start humongous"
2708 // region) and regions already claimed
2709 if (r->claim_value() == claim_value || r->continuesHumongous()) {
2710 continue;
2711 }
2712 // OK, try to claim it
2713 if (r->claimHeapRegion(claim_value)) {
2714 // success!
2715 assert(!r->continuesHumongous(), "sanity");
2716 if (r->startsHumongous()) {
2717 // If the region is "starts humongous" we'll iterate over its
2718 // "continues humongous" first; in fact we'll do them
2719 // first. The order is important. In on case, calling the
2720 // closure on the "starts humongous" region might de-allocate
2721 // and clear all its "continues humongous" regions and, as a
2722 // result, we might end up processing them twice. So, we'll do
2723 // them first (notice: most closures will ignore them anyway) and
2724 // then we'll do the "starts humongous" region.
2725 for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2726 HeapRegion* chr = region_at(ch_index);
2727
2728 // if the region has already been claimed or it's not
2729 // "continues humongous" we're done
2730 if (chr->claim_value() == claim_value ||
2731 !chr->continuesHumongous()) {
2732 break;
2733 }
2734
2735 // No one should have claimed it directly. We can given
2736 // that we claimed its "starts humongous" region.
2737 assert(chr->claim_value() != claim_value, "sanity");
2738 assert(chr->humongous_start_region() == r, "sanity");
2739
2740 if (chr->claimHeapRegion(claim_value)) {
2741 // we should always be able to claim it; no one else should
2742 // be trying to claim this region
2743
2744 bool res2 = cl->doHeapRegion(chr);
2745 assert(!res2, "Should not abort");
2746
2747 // Right now, this holds (i.e., no closure that actually
2748 // does something with "continues humongous" regions
2749 // clears them). We might have to weaken it in the future,
2750 // but let's leave these two asserts here for extra safety.
2751 assert(chr->continuesHumongous(), "should still be the case");
2752 assert(chr->humongous_start_region() == r, "sanity");
2753 } else {
2754 guarantee(false, "we should not reach here");
2755 }
2756 }
2757 }
2758
2759 assert(!r->continuesHumongous(), "sanity");
2760 bool res = cl->doHeapRegion(r);
2761 assert(!res, "Should not abort");
2762 }
2763 }
2764 } 2646 }
2765 2647
2766 class ResetClaimValuesClosure: public HeapRegionClosure { 2648 class ResetClaimValuesClosure: public HeapRegionClosure {
2767 public: 2649 public:
2768 bool doHeapRegion(HeapRegion* r) { 2650 bool doHeapRegion(HeapRegion* r) {
2936 OrderAccess::storestore(); 2818 OrderAccess::storestore();
2937 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp; 2819 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2938 return result; 2820 return result;
2939 } 2821 }
2940 2822
2941 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2942 uint no_of_par_workers) {
2943 uint worker_num =
2944 G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
2945 assert(UseDynamicNumberOfGCThreads ||
2946 no_of_par_workers == workers()->total_workers(),
2947 "Non dynamic should use fixed number of workers");
2948 const uint start_index = n_regions() * worker_i / worker_num;
2949 return region_at(start_index);
2950 }
2951
2952 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 2823 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2953 HeapRegion* r = g1_policy()->collection_set(); 2824 HeapRegion* r = g1_policy()->collection_set();
2954 while (r != NULL) { 2825 while (r != NULL) {
2955 HeapRegion* next = r->next_in_collection_set(); 2826 HeapRegion* next = r->next_in_collection_set();
2956 if (cl->doHeapRegion(r)) { 2827 if (cl->doHeapRegion(r)) {
2988 } 2859 }
2989 cur = next; 2860 cur = next;
2990 } 2861 }
2991 } 2862 }
2992 2863
2993 CompactibleSpace* G1CollectedHeap::first_compactible_space() { 2864 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2994 return n_regions() > 0 ? region_at(0) : NULL; 2865 HeapRegion* result = _hrm.next_region_in_heap(from);
2995 } 2866 while (result != NULL && result->isHumongous()) {
2996 2867 result = _hrm.next_region_in_heap(result);
2868 }
2869 return result;
2870 }
2997 2871
2998 Space* G1CollectedHeap::space_containing(const void* addr) const { 2872 Space* G1CollectedHeap::space_containing(const void* addr) const {
2999 Space* res = heap_region_containing(addr); 2873 return heap_region_containing(addr);
3000 return res;
3001 } 2874 }
3002 2875
3003 HeapWord* G1CollectedHeap::block_start(const void* addr) const { 2876 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
3004 Space* sp = space_containing(addr); 2877 Space* sp = space_containing(addr);
3005 if (sp != NULL) { 2878 return sp->block_start(addr);
3006 return sp->block_start(addr);
3007 }
3008 return NULL;
3009 } 2879 }
3010 2880
3011 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { 2881 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
3012 Space* sp = space_containing(addr); 2882 Space* sp = space_containing(addr);
3013 assert(sp != NULL, "block_size of address outside of heap");
3014 return sp->block_size(addr); 2883 return sp->block_size(addr);
3015 } 2884 }
3016 2885
3017 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { 2886 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
3018 Space* sp = space_containing(addr); 2887 Space* sp = space_containing(addr);
3043 2912
3044 // Also, this value can be at most the humongous object threshold, 2913 // Also, this value can be at most the humongous object threshold,
3045 // since we can't allow tlabs to grow big enough to accommodate 2914 // since we can't allow tlabs to grow big enough to accommodate
3046 // humongous objects. 2915 // humongous objects.
3047 2916
3048 HeapRegion* hr = _mutator_alloc_region.get(); 2917 HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
3049 size_t max_tlab = max_tlab_size() * wordSize; 2918 size_t max_tlab = max_tlab_size() * wordSize;
3050 if (hr == NULL) { 2919 if (hr == NULL) {
3051 return max_tlab; 2920 return max_tlab;
3052 } else { 2921 } else {
3053 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 2922 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
3054 } 2923 }
3055 } 2924 }
3056 2925
3057 size_t G1CollectedHeap::max_capacity() const { 2926 size_t G1CollectedHeap::max_capacity() const {
3058 return _g1_reserved.byte_size(); 2927 return _hrm.reserved().byte_size();
3059 } 2928 }
3060 2929
3061 jlong G1CollectedHeap::millis_since_last_gc() { 2930 jlong G1CollectedHeap::millis_since_last_gc() {
3062 // assert(false, "NYI"); 2931 // assert(false, "NYI");
3063 return 0; 2932 return 0;
3422 assert(Thread::current()->is_VM_thread(), 3291 assert(Thread::current()->is_VM_thread(),
3423 "Expected to be executed serially by the VM thread at this point"); 3292 "Expected to be executed serially by the VM thread at this point");
3424 3293
3425 if (!silent) { gclog_or_tty->print("Roots "); } 3294 if (!silent) { gclog_or_tty->print("Roots "); }
3426 VerifyRootsClosure rootsCl(vo); 3295 VerifyRootsClosure rootsCl(vo);
3296 VerifyKlassClosure klassCl(this, &rootsCl);
3297 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3298
3299 // We apply the relevant closures to all the oops in the
3300 // system dictionary, class loader data graph, the string table
3301 // and the nmethods in the code cache.
3427 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); 3302 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3428 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); 3303 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3429 VerifyKlassClosure klassCl(this, &rootsCl); 3304
3430 3305 process_all_roots(true, // activate StrongRootsScope
3431 // We apply the relevant closures to all the oops in the 3306 SO_AllCodeCache, // roots scanning options
3432 // system dictionary, the string table and the code cache. 3307 &rootsCl,
3433 const int so = SO_AllClasses | SO_Strings | SO_CodeCache; 3308 &cldCl,
3434 3309 &blobsCl);
3435 // Need cleared claim bits for the strong roots processing
3436 ClassLoaderDataGraph::clear_claimed_marks();
3437
3438 process_strong_roots(true, // activate StrongRootsScope
3439 false, // we set "is scavenging" to false,
3440 // so we don't reset the dirty cards.
3441 ScanningOption(so), // roots scanning options
3442 &rootsCl,
3443 &blobsCl,
3444 &klassCl
3445 );
3446 3310
3447 bool failures = rootsCl.failures() || codeRootsCl.failures(); 3311 bool failures = rootsCl.failures() || codeRootsCl.failures();
3448 3312
3449 if (vo != VerifyOption_G1UseMarkWord) { 3313 if (vo != VerifyOption_G1UseMarkWord) {
3450 // If we're verifying during a full GC then the region sets 3314 // If we're verifying during a full GC then the region sets
3587 void G1CollectedHeap::print_on(outputStream* st) const { 3451 void G1CollectedHeap::print_on(outputStream* st) const {
3588 st->print(" %-20s", "garbage-first heap"); 3452 st->print(" %-20s", "garbage-first heap");
3589 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 3453 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3590 capacity()/K, used_unlocked()/K); 3454 capacity()/K, used_unlocked()/K);
3591 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 3455 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3592 _g1_storage.low_boundary(), 3456 _hrm.reserved().start(),
3593 _g1_storage.high(), 3457 _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
3594 _g1_storage.high_boundary()); 3458 _hrm.reserved().end());
3595 st->cr(); 3459 st->cr();
3596 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); 3460 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3597 uint young_regions = _young_list->length(); 3461 uint young_regions = _young_list->length();
3598 st->print("%u young (" SIZE_FORMAT "K), ", young_regions, 3462 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3599 (size_t) young_regions * HeapRegion::GrainBytes / K); 3463 (size_t) young_regions * HeapRegion::GrainBytes / K);
3735 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3599 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3736 g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); 3600 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3737 } 3601 }
3738 } 3602 }
3739 3603
3740 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { 3604 void G1CollectedHeap::gc_epilogue(bool full) {
3741 3605
3742 if (G1SummarizeRSetStats && 3606 if (G1SummarizeRSetStats &&
3743 (G1SummarizeRSetStatsPeriod > 0) && 3607 (G1SummarizeRSetStatsPeriod > 0) &&
3744 // we are at the end of the GC. Total collections has already been increased. 3608 // we are at the end of the GC. Total collections has already been increased.
3745 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { 3609 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3752 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 3616 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3753 "derived pointer present")); 3617 "derived pointer present"));
3754 // always_do_update_barrier = true; 3618 // always_do_update_barrier = true;
3755 3619
3756 resize_all_tlabs(); 3620 resize_all_tlabs();
3621 allocation_context_stats().update(full);
3757 3622
3758 // We have just completed a GC. Update the soft reference 3623 // We have just completed a GC. Update the soft reference
3759 // policy with the new heap occupancy 3624 // policy with the new heap occupancy
3760 Universe::update_heap_info_at_gc(); 3625 Universe::update_heap_info_at_gc();
3761 } 3626 }
3769 VM_G1IncCollectionPause op(gc_count_before, 3634 VM_G1IncCollectionPause op(gc_count_before,
3770 word_size, 3635 word_size,
3771 false, /* should_initiate_conc_mark */ 3636 false, /* should_initiate_conc_mark */
3772 g1_policy()->max_pause_time_ms(), 3637 g1_policy()->max_pause_time_ms(),
3773 gc_cause); 3638 gc_cause);
3639
3640 op.set_allocation_context(AllocationContext::current());
3774 VMThread::execute(&op); 3641 VMThread::execute(&op);
3775 3642
3776 HeapWord* result = op.result(); 3643 HeapWord* result = op.result();
3777 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); 3644 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3778 assert(result == NULL || ret_succeeded, 3645 assert(result == NULL || ret_succeeded,
3810 return (buffer_size * buffer_num + extra_cards) / oopSize; 3677 return (buffer_size * buffer_num + extra_cards) / oopSize;
3811 } 3678 }
3812 3679
3813 size_t G1CollectedHeap::cards_scanned() { 3680 size_t G1CollectedHeap::cards_scanned() {
3814 return g1_rem_set()->cardsScanned(); 3681 return g1_rem_set()->cardsScanned();
3682 }
3683
3684 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3685 HeapRegion* region = region_at(index);
3686 assert(region->startsHumongous(), "Must start a humongous object");
3687 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3688 }
3689
3690 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3691 private:
3692 size_t _total_humongous;
3693 size_t _candidate_humongous;
3694 public:
3695 RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3696 }
3697
3698 virtual bool doHeapRegion(HeapRegion* r) {
3699 if (!r->startsHumongous()) {
3700 return false;
3701 }
3702 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3703
3704 uint region_idx = r->hrm_index();
3705 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
3706 // Is_candidate already filters out humongous regions with some remembered set.
3707 // This will not lead to humongous object that we mistakenly keep alive because
3708 // during young collection the remembered sets will only be added to.
3709 if (is_candidate) {
3710 g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3711 _candidate_humongous++;
3712 }
3713 _total_humongous++;
3714
3715 return false;
3716 }
3717
3718 size_t total_humongous() const { return _total_humongous; }
3719 size_t candidate_humongous() const { return _candidate_humongous; }
3720 };
3721
3722 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3723 if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
3724 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
3725 return;
3726 }
3727
3728 RegisterHumongousWithInCSetFastTestClosure cl;
3729 heap_region_iterate(&cl);
3730 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
3731 cl.candidate_humongous());
3732 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3733
3734 if (_has_humongous_reclaim_candidates) {
3735 clear_humongous_is_live_table();
3736 }
3815 } 3737 }
3816 3738
3817 void 3739 void
3818 G1CollectedHeap::setup_surviving_young_words() { 3740 G1CollectedHeap::setup_surviving_young_words() {
3819 assert(_surviving_young_words == NULL, "pre-condition"); 3741 assert(_surviving_young_words == NULL, "pre-condition");
3901 void G1CollectedHeap::log_gc_header() { 3823 void G1CollectedHeap::log_gc_header() {
3902 if (!G1Log::fine()) { 3824 if (!G1Log::fine()) {
3903 return; 3825 return;
3904 } 3826 }
3905 3827
3906 gclog_or_tty->date_stamp(PrintGCDateStamps); 3828 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3907 gclog_or_tty->stamp(PrintGCTimeStamps);
3908 3829
3909 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) 3830 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3910 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)") 3831 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3911 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : ""); 3832 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3912 3833
4023 gc_prologue(false); 3944 gc_prologue(false);
4024 increment_total_collections(false /* full gc */); 3945 increment_total_collections(false /* full gc */);
4025 increment_gc_time_stamp(); 3946 increment_gc_time_stamp();
4026 3947
4027 verify_before_gc(); 3948 verify_before_gc();
3949 check_bitmaps("GC Start");
4028 3950
4029 COMPILER2_PRESENT(DerivedPointerTable::clear()); 3951 COMPILER2_PRESENT(DerivedPointerTable::clear());
4030 3952
4031 // Please see comment in g1CollectedHeap.hpp and 3953 // Please see comment in g1CollectedHeap.hpp and
4032 // G1CollectedHeap::ref_processing_init() to see how 3954 // G1CollectedHeap::ref_processing_init() to see how
4043 // NoRefDiscovery object will do this. 3965 // NoRefDiscovery object will do this.
4044 NoRefDiscovery no_cm_discovery(ref_processor_cm()); 3966 NoRefDiscovery no_cm_discovery(ref_processor_cm());
4045 3967
4046 // Forget the current alloc region (we might even choose it to be part 3968 // Forget the current alloc region (we might even choose it to be part
4047 // of the collection set!). 3969 // of the collection set!).
4048 release_mutator_alloc_region(); 3970 _allocator->release_mutator_alloc_region();
4049 3971
4050 // We should call this after we retire the mutator alloc 3972 // We should call this after we retire the mutator alloc
4051 // region(s) so that all the ALLOC / RETIRE events are generated 3973 // region(s) so that all the ALLOC / RETIRE events are generated
4052 // before the start GC event. 3974 // before the start GC event.
4053 _hr_printer.start_gc(false /* full */, (size_t) total_collections()); 3975 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
4098 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 4020 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4099 #endif // YOUNG_LIST_VERBOSE 4021 #endif // YOUNG_LIST_VERBOSE
4100 4022
4101 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); 4023 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4102 4024
4025 register_humongous_regions_with_in_cset_fast_test();
4026
4103 _cm->note_start_of_gc(); 4027 _cm->note_start_of_gc();
4104 // We should not verify the per-thread SATB buffers given that 4028 // We should not verify the per-thread SATB buffers given that
4105 // we have not filtered them yet (we'll do so during the 4029 // we have not filtered them yet (we'll do so during the
4106 // GC). We also call this after finalize_cset() to 4030 // GC). We also call this after finalize_cset() to
4107 // ensure that the CSet has been finalized. 4031 // ensure that the CSet has been finalized.
4111 true /* verify_fingers */); 4035 true /* verify_fingers */);
4112 4036
4113 if (_hr_printer.is_active()) { 4037 if (_hr_printer.is_active()) {
4114 HeapRegion* hr = g1_policy()->collection_set(); 4038 HeapRegion* hr = g1_policy()->collection_set();
4115 while (hr != NULL) { 4039 while (hr != NULL) {
4116 G1HRPrinter::RegionType type;
4117 if (!hr->is_young()) {
4118 type = G1HRPrinter::Old;
4119 } else if (hr->is_survivor()) {
4120 type = G1HRPrinter::Survivor;
4121 } else {
4122 type = G1HRPrinter::Eden;
4123 }
4124 _hr_printer.cset(hr); 4040 _hr_printer.cset(hr);
4125 hr = hr->next_in_collection_set(); 4041 hr = hr->next_in_collection_set();
4126 } 4042 }
4127 } 4043 }
4128 4044
4132 #endif // ASSERT 4048 #endif // ASSERT
4133 4049
4134 setup_surviving_young_words(); 4050 setup_surviving_young_words();
4135 4051
4136 // Initialize the GC alloc regions. 4052 // Initialize the GC alloc regions.
4137 init_gc_alloc_regions(evacuation_info); 4053 _allocator->init_gc_alloc_regions(evacuation_info);
4138 4054
4139 // Actually do the work... 4055 // Actually do the work...
4140 evacuate_collection_set(evacuation_info); 4056 evacuate_collection_set(evacuation_info);
4141 4057
4142 // We do this to mainly verify the per-thread SATB buffers 4058 // We do this to mainly verify the per-thread SATB buffers
4148 false /* verify_enqueued_buffers */, 4064 false /* verify_enqueued_buffers */,
4149 true /* verify_thread_buffers */, 4065 true /* verify_thread_buffers */,
4150 true /* verify_fingers */); 4066 true /* verify_fingers */);
4151 4067
4152 free_collection_set(g1_policy()->collection_set(), evacuation_info); 4068 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4069
4070 eagerly_reclaim_humongous_regions();
4071
4153 g1_policy()->clear_collection_set(); 4072 g1_policy()->clear_collection_set();
4154 4073
4155 cleanup_surviving_young_words(); 4074 cleanup_surviving_young_words();
4156 4075
4157 // Start a new incremental collection set for the next pause. 4076 // Start a new incremental collection set for the next pause.
4158 g1_policy()->start_incremental_cset_building(); 4077 g1_policy()->start_incremental_cset_building();
4159 4078
4160 // Clear the _cset_fast_test bitmap in anticipation of adding
4161 // regions to the incremental collection set for the next
4162 // evacuation pause.
4163 clear_cset_fast_test(); 4079 clear_cset_fast_test();
4164 4080
4165 _young_list->reset_sampled_info(); 4081 _young_list->reset_sampled_info();
4166 4082
4167 // Don't check the whole heap at this point as the 4083 // Don't check the whole heap at this point as the
4181 _young_list->last_survivor_region()); 4097 _young_list->last_survivor_region());
4182 4098
4183 _young_list->reset_auxilary_lists(); 4099 _young_list->reset_auxilary_lists();
4184 4100
4185 if (evacuation_failed()) { 4101 if (evacuation_failed()) {
4186 _summary_bytes_used = recalculate_used(); 4102 _allocator->set_used(recalculate_used());
4187 uint n_queues = MAX2((int)ParallelGCThreads, 1); 4103 uint n_queues = MAX2((int)ParallelGCThreads, 1);
4188 for (uint i = 0; i < n_queues; i++) { 4104 for (uint i = 0; i < n_queues; i++) {
4189 if (_evacuation_failed_info_array[i].has_failed()) { 4105 if (_evacuation_failed_info_array[i].has_failed()) {
4190 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); 4106 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
4191 } 4107 }
4192 } 4108 }
4193 } else { 4109 } else {
4194 // The "used" of the the collection set have already been subtracted 4110 // The "used" of the the collection set have already been subtracted
4195 // when they were freed. Add in the bytes evacuated. 4111 // when they were freed. Add in the bytes evacuated.
4196 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); 4112 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
4197 } 4113 }
4198 4114
4199 if (g1_policy()->during_initial_mark_pause()) { 4115 if (g1_policy()->during_initial_mark_pause()) {
4200 // We have to do this before we notify the CM threads that 4116 // We have to do this before we notify the CM threads that
4201 // they can start working to make sure that all the 4117 // they can start working to make sure that all the
4213 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); 4129 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
4214 _young_list->print(); 4130 _young_list->print();
4215 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 4131 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4216 #endif // YOUNG_LIST_VERBOSE 4132 #endif // YOUNG_LIST_VERBOSE
4217 4133
4218 init_mutator_alloc_region(); 4134 _allocator->init_mutator_alloc_region();
4219 4135
4220 { 4136 {
4221 size_t expand_bytes = g1_policy()->expansion_amount(); 4137 size_t expand_bytes = g1_policy()->expansion_amount();
4222 if (expand_bytes > 0) { 4138 if (expand_bytes > 0) {
4223 size_t bytes_before = capacity(); 4139 size_t bytes_before = capacity();
4224 // No need for an ergo verbose message here, 4140 // No need for an ergo verbose message here,
4225 // expansion_amount() does this when it returns a value > 0. 4141 // expansion_amount() does this when it returns a value > 0.
4226 if (!expand(expand_bytes)) { 4142 if (!expand(expand_bytes)) {
4227 // We failed to expand the heap so let's verify that 4143 // We failed to expand the heap. Cannot do anything about it.
4228 // committed/uncommitted amount match the backing store
4229 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
4230 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
4231 } 4144 }
4232 } 4145 }
4233 } 4146 }
4234 4147
4235 // We redo the verification but now wrt to the new CSet which 4148 // We redo the verification but now wrt to the new CSet which
4271 // is_gc_active() check to decided which top to use when 4184 // is_gc_active() check to decided which top to use when
4272 // scanning cards (see CR 7039627). 4185 // scanning cards (see CR 7039627).
4273 increment_gc_time_stamp(); 4186 increment_gc_time_stamp();
4274 4187
4275 verify_after_gc(); 4188 verify_after_gc();
4189 check_bitmaps("GC End");
4276 4190
4277 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); 4191 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4278 ref_processor_stw()->verify_no_references_recorded(); 4192 ref_processor_stw()->verify_no_references_recorded();
4279 4193
4280 // CM reference discovery will be re-enabled if necessary. 4194 // CM reference discovery will be re-enabled if necessary.
4284 // that all the COMMIT events are generated before the end GC 4198 // that all the COMMIT events are generated before the end GC
4285 // event, and after we retire the GC alloc regions so that all 4199 // event, and after we retire the GC alloc regions so that all
4286 // RETIRE events are generated before the end GC event. 4200 // RETIRE events are generated before the end GC event.
4287 _hr_printer.end_gc(false /* full */, (size_t) total_collections()); 4201 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4288 4202
4289 if (mark_in_progress()) {
4290 concurrent_mark()->update_g1_committed();
4291 }
4292
4293 #ifdef TRACESPINNING 4203 #ifdef TRACESPINNING
4294 ParallelTaskTerminator::print_termination_counts(); 4204 ParallelTaskTerminator::print_termination_counts();
4295 #endif 4205 #endif
4296 4206
4297 gc_epilogue(false); 4207 gc_epilogue(false);
4303 // It is not yet to safe to tell the concurrent mark to 4213 // It is not yet to safe to tell the concurrent mark to
4304 // start as we have some optional output below. We don't want the 4214 // start as we have some optional output below. We don't want the
4305 // output from the concurrent mark thread interfering with this 4215 // output from the concurrent mark thread interfering with this
4306 // logging output either. 4216 // logging output either.
4307 4217
4308 _hrs.verify_optional(); 4218 _hrm.verify_optional();
4309 verify_region_sets_optional(); 4219 verify_region_sets_optional();
4310 4220
4311 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 4221 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4312 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 4222 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4313 4223
4334 // the concurrent marking thread(s) could be running 4244 // the concurrent marking thread(s) could be running
4335 // concurrently with us. Make sure that anything after 4245 // concurrently with us. Make sure that anything after
4336 // this point does not assume that we are the only GC thread 4246 // this point does not assume that we are the only GC thread
4337 // running. Note: of course, the actual marking work will 4247 // running. Note: of course, the actual marking work will
4338 // not start until the safepoint itself is released in 4248 // not start until the safepoint itself is released in
4339 // ConcurrentGCThread::safepoint_desynchronize(). 4249 // SuspendibleThreadSet::desynchronize().
4340 doConcurrentMark(); 4250 doConcurrentMark();
4341 } 4251 }
4342 4252
4343 return true; 4253 return true;
4344 } 4254 }
4362 // Prevent humongous PLAB sizes for two reasons: 4272 // Prevent humongous PLAB sizes for two reasons:
4363 // * PLABs are allocated using a similar paths as oops, but should 4273 // * PLABs are allocated using a similar paths as oops, but should
4364 // never be in a humongous region 4274 // never be in a humongous region
4365 // * Allowing humongous PLABs needlessly churns the region free lists 4275 // * Allowing humongous PLABs needlessly churns the region free lists
4366 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); 4276 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4367 }
4368
4369 void G1CollectedHeap::init_mutator_alloc_region() {
4370 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4371 _mutator_alloc_region.init();
4372 }
4373
4374 void G1CollectedHeap::release_mutator_alloc_region() {
4375 _mutator_alloc_region.release();
4376 assert(_mutator_alloc_region.get() == NULL, "post-condition");
4377 }
4378
4379 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
4380 assert_at_safepoint(true /* should_be_vm_thread */);
4381
4382 _survivor_gc_alloc_region.init();
4383 _old_gc_alloc_region.init();
4384 HeapRegion* retained_region = _retained_old_gc_alloc_region;
4385 _retained_old_gc_alloc_region = NULL;
4386
4387 // We will discard the current GC alloc region if:
4388 // a) it's in the collection set (it can happen!),
4389 // b) it's already full (no point in using it),
4390 // c) it's empty (this means that it was emptied during
4391 // a cleanup and it should be on the free list now), or
4392 // d) it's humongous (this means that it was emptied
4393 // during a cleanup and was added to the free list, but
4394 // has been subsequently used to allocate a humongous
4395 // object that may be less than the region size).
4396 if (retained_region != NULL &&
4397 !retained_region->in_collection_set() &&
4398 !(retained_region->top() == retained_region->end()) &&
4399 !retained_region->is_empty() &&
4400 !retained_region->isHumongous()) {
4401 retained_region->set_saved_mark();
4402 // The retained region was added to the old region set when it was
4403 // retired. We have to remove it now, since we don't allow regions
4404 // we allocate to in the region sets. We'll re-add it later, when
4405 // it's retired again.
4406 _old_set.remove(retained_region);
4407 bool during_im = g1_policy()->during_initial_mark_pause();
4408 retained_region->note_start_of_copying(during_im);
4409 _old_gc_alloc_region.set(retained_region);
4410 _hr_printer.reuse(retained_region);
4411 evacuation_info.set_alloc_regions_used_before(retained_region->used());
4412 }
4413 }
4414
4415 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
4416 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
4417 _old_gc_alloc_region.count());
4418 _survivor_gc_alloc_region.release();
4419 // If we have an old GC alloc region to release, we'll save it in
4420 // _retained_old_gc_alloc_region. If we don't
4421 // _retained_old_gc_alloc_region will become NULL. This is what we
4422 // want either way so no reason to check explicitly for either
4423 // condition.
4424 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4425
4426 if (ResizePLAB) {
4427 _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4428 _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4429 }
4430 }
4431
4432 void G1CollectedHeap::abandon_gc_alloc_regions() {
4433 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4434 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4435 _retained_old_gc_alloc_region = NULL;
4436 } 4277 }
4437 4278
4438 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 4279 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4439 _drain_in_progress = false; 4280 _drain_in_progress = false;
4440 set_evac_failure_closure(cl); 4281 set_evac_failure_closure(cl);
4573 _preserved_marks_of_objs.push(m); 4414 _preserved_marks_of_objs.push(m);
4574 } 4415 }
4575 } 4416 }
4576 4417
4577 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, 4418 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4578 size_t word_size) { 4419 size_t word_size,
4420 AllocationContext_t context) {
4579 if (purpose == GCAllocForSurvived) { 4421 if (purpose == GCAllocForSurvived) {
4580 HeapWord* result = survivor_attempt_allocation(word_size); 4422 HeapWord* result = survivor_attempt_allocation(word_size, context);
4581 if (result != NULL) { 4423 if (result != NULL) {
4582 return result; 4424 return result;
4583 } else { 4425 } else {
4584 // Let's try to allocate in the old gen in case we can fit the 4426 // Let's try to allocate in the old gen in case we can fit the
4585 // object there. 4427 // object there.
4586 return old_attempt_allocation(word_size); 4428 return old_attempt_allocation(word_size, context);
4587 } 4429 }
4588 } else { 4430 } else {
4589 assert(purpose == GCAllocForTenured, "sanity"); 4431 assert(purpose == GCAllocForTenured, "sanity");
4590 HeapWord* result = old_attempt_allocation(word_size); 4432 HeapWord* result = old_attempt_allocation(word_size, context);
4591 if (result != NULL) { 4433 if (result != NULL) {
4592 return result; 4434 return result;
4593 } else { 4435 } else {
4594 // Let's try to allocate in the survivors in case we can fit the 4436 // Let's try to allocate in the survivors in case we can fit the
4595 // object there. 4437 // object there.
4596 return survivor_attempt_allocation(word_size); 4438 return survivor_attempt_allocation(word_size, context);
4597 } 4439 }
4598 } 4440 }
4599 4441
4600 ShouldNotReachHere(); 4442 ShouldNotReachHere();
4601 // Trying to keep some compilers happy. 4443 // Trying to keep some compilers happy.
4602 return NULL; 4444 return NULL;
4603 } 4445 }
4604 4446
4605 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4606 ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4607
4608 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4609 : _g1h(g1h),
4610 _refs(g1h->task_queue(queue_num)),
4611 _dcq(&g1h->dirty_card_queue_set()),
4612 _ct_bs(g1h->g1_barrier_set()),
4613 _g1_rem(g1h->g1_rem_set()),
4614 _hash_seed(17), _queue_num(queue_num),
4615 _term_attempts(0),
4616 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4617 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4618 _age_table(false), _scanner(g1h, this, rp),
4619 _strong_roots_time(0), _term_time(0),
4620 _alloc_buffer_waste(0), _undo_waste(0) {
4621 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4622 // we "sacrifice" entry 0 to keep track of surviving bytes for
4623 // non-young regions (where the age is -1)
4624 // We also add a few elements at the beginning and at the end in
4625 // an attempt to eliminate cache contention
4626 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4627 uint array_length = PADDING_ELEM_NUM +
4628 real_length +
4629 PADDING_ELEM_NUM;
4630 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4631 if (_surviving_young_words_base == NULL)
4632 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
4633 "Not enough space for young surv histo.");
4634 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4635 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4636
4637 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4638 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
4639
4640 _start = os::elapsedTime();
4641 }
4642
4643 void
4644 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4645 {
4646 st->print_raw_cr("GC Termination Stats");
4647 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
4648 " ------waste (KiB)------");
4649 st->print_raw_cr("thr ms ms % ms % attempts"
4650 " total alloc undo");
4651 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4652 " ------- ------- -------");
4653 }
4654
4655 void
4656 G1ParScanThreadState::print_termination_stats(int i,
4657 outputStream* const st) const
4658 {
4659 const double elapsed_ms = elapsed_time() * 1000.0;
4660 const double s_roots_ms = strong_roots_time() * 1000.0;
4661 const double term_ms = term_time() * 1000.0;
4662 st->print_cr("%3d %9.2f %9.2f %6.2f "
4663 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4664 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4665 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4666 term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4667 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4668 alloc_buffer_waste() * HeapWordSize / K,
4669 undo_waste() * HeapWordSize / K);
4670 }
4671
4672 #ifdef ASSERT
4673 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4674 assert(ref != NULL, "invariant");
4675 assert(UseCompressedOops, "sanity");
4676 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4677 oop p = oopDesc::load_decode_heap_oop(ref);
4678 assert(_g1h->is_in_g1_reserved(p),
4679 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4680 return true;
4681 }
4682
4683 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4684 assert(ref != NULL, "invariant");
4685 if (has_partial_array_mask(ref)) {
4686 // Must be in the collection set--it's already been copied.
4687 oop p = clear_partial_array_mask(ref);
4688 assert(_g1h->obj_in_cs(p),
4689 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4690 } else {
4691 oop p = oopDesc::load_decode_heap_oop(ref);
4692 assert(_g1h->is_in_g1_reserved(p),
4693 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4694 }
4695 return true;
4696 }
4697
4698 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4699 if (ref.is_narrow()) {
4700 return verify_ref((narrowOop*) ref);
4701 } else {
4702 return verify_ref((oop*) ref);
4703 }
4704 }
4705 #endif // ASSERT
4706
4707 void G1ParScanThreadState::trim_queue() {
4708 assert(_evac_failure_cl != NULL, "not set");
4709
4710 StarTask ref;
4711 do {
4712 // Drain the overflow stack first, so other threads can steal.
4713 while (refs()->pop_overflow(ref)) {
4714 deal_with_reference(ref);
4715 }
4716
4717 while (refs()->pop_local(ref)) {
4718 deal_with_reference(ref);
4719 }
4720 } while (!refs()->is_empty());
4721 }
4722
4723 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4724 G1ParScanThreadState* par_scan_state) :
4725 _g1(g1), _par_scan_state(par_scan_state),
4726 _worker_id(par_scan_state->queue_num()) { }
4727
4728 void G1ParCopyHelper::mark_object(oop obj) { 4447 void G1ParCopyHelper::mark_object(oop obj) {
4729 #ifdef ASSERT 4448 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4730 HeapRegion* hr = _g1->heap_region_containing(obj);
4731 assert(hr != NULL, "sanity");
4732 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4733 #endif // ASSERT
4734 4449
4735 // We know that the object is not moving so it's safe to read its size. 4450 // We know that the object is not moving so it's safe to read its size.
4736 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); 4451 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4737 } 4452 }
4738 4453
4739 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { 4454 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4740 #ifdef ASSERT
4741 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 4455 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4742 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 4456 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4743 assert(from_obj != to_obj, "should not be self-forwarded"); 4457 assert(from_obj != to_obj, "should not be self-forwarded");
4744 4458
4745 HeapRegion* from_hr = _g1->heap_region_containing(from_obj); 4459 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4746 assert(from_hr != NULL, "sanity"); 4460 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4747 assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4748
4749 HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4750 assert(to_hr != NULL, "sanity");
4751 assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4752 #endif // ASSERT
4753 4461
4754 // The object might be in the process of being copied by another 4462 // The object might be in the process of being copied by another
4755 // worker so we cannot trust that its to-space image is 4463 // worker so we cannot trust that its to-space image is
4756 // well-formed. So we have to read its size from its from-space 4464 // well-formed. So we have to read its size from its from-space
4757 // image which we know should not be changing. 4465 // image which we know should not be changing.
4758 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); 4466 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4759 } 4467 }
4760 4468
4761 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
4762 size_t word_sz = old->size();
4763 HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
4764 // +1 to make the -1 indexes valid...
4765 int young_index = from_region->young_index_in_cset()+1;
4766 assert( (from_region->is_young() && young_index > 0) ||
4767 (!from_region->is_young() && young_index == 0), "invariant" );
4768 G1CollectorPolicy* g1p = _g1h->g1_policy();
4769 markOop m = old->mark();
4770 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4771 : m->age();
4772 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4773 word_sz);
4774 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
4775 #ifndef PRODUCT
4776 // Should this evacuation fail?
4777 if (_g1h->evacuation_should_fail()) {
4778 if (obj_ptr != NULL) {
4779 undo_allocation(alloc_purpose, obj_ptr, word_sz);
4780 obj_ptr = NULL;
4781 }
4782 }
4783 #endif // !PRODUCT
4784
4785 if (obj_ptr == NULL) {
4786 // This will either forward-to-self, or detect that someone else has
4787 // installed a forwarding pointer.
4788 return _g1h->handle_evacuation_failure_par(this, old);
4789 }
4790
4791 oop obj = oop(obj_ptr);
4792
4793 // We're going to allocate linearly, so might as well prefetch ahead.
4794 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4795
4796 oop forward_ptr = old->forward_to_atomic(obj);
4797 if (forward_ptr == NULL) {
4798 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4799
4800 // alloc_purpose is just a hint to allocate() above, recheck the type of region
4801 // we actually allocated from and update alloc_purpose accordingly
4802 HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
4803 alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
4804
4805 if (g1p->track_object_age(alloc_purpose)) {
4806 // We could simply do obj->incr_age(). However, this causes a
4807 // performance issue. obj->incr_age() will first check whether
4808 // the object has a displaced mark by checking its mark word;
4809 // getting the mark word from the new location of the object
4810 // stalls. So, given that we already have the mark word and we
4811 // are about to install it anyway, it's better to increase the
4812 // age on the mark word, when the object does not have a
4813 // displaced mark word. We're not expecting many objects to have
4814 // a displaced marked word, so that case is not optimized
4815 // further (it could be...) and we simply call obj->incr_age().
4816
4817 if (m->has_displaced_mark_helper()) {
4818 // in this case, we have to install the mark word first,
4819 // otherwise obj looks to be forwarded (the old mark word,
4820 // which contains the forward pointer, was copied)
4821 obj->set_mark(m);
4822 obj->incr_age();
4823 } else {
4824 m = m->incr_age();
4825 obj->set_mark(m);
4826 }
4827 age_table()->add(obj, word_sz);
4828 } else {
4829 obj->set_mark(m);
4830 }
4831
4832 if (G1StringDedup::is_enabled()) {
4833 G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
4834 to_region->is_young(),
4835 queue_num(),
4836 obj);
4837 }
4838
4839 size_t* surv_young_words = surviving_young_words();
4840 surv_young_words[young_index] += word_sz;
4841
4842 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4843 // We keep track of the next start index in the length field of
4844 // the to-space object. The actual length can be found in the
4845 // length field of the from-space object.
4846 arrayOop(obj)->set_length(0);
4847 oop* old_p = set_partial_array_mask(old);
4848 push_on_queue(old_p);
4849 } else {
4850 // No point in using the slower heap_region_containing() method,
4851 // given that we know obj is in the heap.
4852 _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4853 obj->oop_iterate_backwards(&_scanner);
4854 }
4855 } else {
4856 undo_allocation(alloc_purpose, obj_ptr, word_sz);
4857 obj = forward_ptr;
4858 }
4859 return obj;
4860 }
4861
4862 template <class T> 4469 template <class T>
4863 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { 4470 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4864 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { 4471 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4865 _scanned_klass->record_modified_oops(); 4472 _scanned_klass->record_modified_oops();
4866 } 4473 }
4867 } 4474 }
4868 4475
4869 template <G1Barrier barrier, bool do_mark_object> 4476 template <G1Barrier barrier, G1Mark do_mark_object>
4870 template <class T> 4477 template <class T>
4871 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { 4478 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4872 T heap_oop = oopDesc::load_heap_oop(p); 4479 T heap_oop = oopDesc::load_heap_oop(p);
4873 4480
4874 if (oopDesc::is_null(heap_oop)) { 4481 if (oopDesc::is_null(heap_oop)) {
4877 4484
4878 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 4485 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4879 4486
4880 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); 4487 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4881 4488
4882 if (_g1->in_cset_fast_test(obj)) { 4489 G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
4490
4491 if (state == G1CollectedHeap::InCSet) {
4883 oop forwardee; 4492 oop forwardee;
4884 if (obj->is_forwarded()) { 4493 if (obj->is_forwarded()) {
4885 forwardee = obj->forwardee(); 4494 forwardee = obj->forwardee();
4886 } else { 4495 } else {
4887 forwardee = _par_scan_state->copy_to_survivor_space(obj); 4496 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4888 } 4497 }
4889 assert(forwardee != NULL, "forwardee should not be NULL"); 4498 assert(forwardee != NULL, "forwardee should not be NULL");
4890 oopDesc::encode_store_heap_oop(p, forwardee); 4499 oopDesc::encode_store_heap_oop(p, forwardee);
4891 if (do_mark_object && forwardee != obj) { 4500 if (do_mark_object != G1MarkNone && forwardee != obj) {
4892 // If the object is self-forwarded we don't need to explicitly 4501 // If the object is self-forwarded we don't need to explicitly
4893 // mark it, the evacuation failure protocol will do so. 4502 // mark it, the evacuation failure protocol will do so.
4894 mark_forwarded_object(obj, forwardee); 4503 mark_forwarded_object(obj, forwardee);
4895 } 4504 }
4896 4505
4897 if (barrier == G1BarrierKlass) { 4506 if (barrier == G1BarrierKlass) {
4898 do_klass_barrier(p, forwardee); 4507 do_klass_barrier(p, forwardee);
4899 } 4508 }
4900 } else { 4509 } else {
4510 if (state == G1CollectedHeap::IsHumongous) {
4511 _g1->set_humongous_is_live(obj);
4512 }
4901 // The object is not in collection set. If we're a root scanning 4513 // The object is not in collection set. If we're a root scanning
4902 // closure during an initial mark pause (i.e. do_mark_object will 4514 // closure during an initial mark pause then attempt to mark the object.
4903 // be true) then attempt to mark the object. 4515 if (do_mark_object == G1MarkFromRoot) {
4904 if (do_mark_object) {
4905 mark_object(obj); 4516 mark_object(obj);
4906 } 4517 }
4907 } 4518 }
4908 4519
4909 if (barrier == G1BarrierEvac) { 4520 if (barrier == G1BarrierEvac) {
4910 _par_scan_state->update_rs(_from, p, _worker_id); 4521 _par_scan_state->update_rs(_from, p, _worker_id);
4911 } 4522 }
4912 } 4523 }
4913 4524
4914 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p); 4525 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4915 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p); 4526 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4916 4527
4917 class G1ParEvacuateFollowersClosure : public VoidClosure { 4528 class G1ParEvacuateFollowersClosure : public VoidClosure {
4918 protected: 4529 protected:
4919 G1CollectedHeap* _g1h; 4530 G1CollectedHeap* _g1h;
4920 G1ParScanThreadState* _par_scan_state; 4531 G1ParScanThreadState* _par_scan_state;
4946 pss->end_term_time(); 4557 pss->end_term_time();
4947 return res; 4558 return res;
4948 } 4559 }
4949 4560
4950 void G1ParEvacuateFollowersClosure::do_void() { 4561 void G1ParEvacuateFollowersClosure::do_void() {
4951 StarTask stolen_task;
4952 G1ParScanThreadState* const pss = par_scan_state(); 4562 G1ParScanThreadState* const pss = par_scan_state();
4953 pss->trim_queue(); 4563 pss->trim_queue();
4954
4955 do { 4564 do {
4956 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { 4565 pss->steal_and_trim_queue(queues());
4957 assert(pss->verify_task(stolen_task), "sanity");
4958 if (stolen_task.is_narrow()) {
4959 pss->deal_with_reference((narrowOop*) stolen_task);
4960 } else {
4961 pss->deal_with_reference((oop*) stolen_task);
4962 }
4963
4964 // We've just processed a reference and we might have made
4965 // available new entries on the queues. So we have to make sure
4966 // we drain the queues as necessary.
4967 pss->trim_queue();
4968 }
4969 } while (!offer_termination()); 4566 } while (!offer_termination());
4970
4971 pss->retire_alloc_buffers();
4972 } 4567 }
4973 4568
4974 class G1KlassScanClosure : public KlassClosure { 4569 class G1KlassScanClosure : public KlassClosure {
4975 G1ParCopyHelper* _closure; 4570 G1ParCopyHelper* _closure;
4976 bool _process_only_dirty; 4571 bool _process_only_dirty;
4995 } 4590 }
4996 _count++; 4591 _count++;
4997 } 4592 }
4998 }; 4593 };
4999 4594
4595 class G1CodeBlobClosure : public CodeBlobClosure {
4596 class HeapRegionGatheringOopClosure : public OopClosure {
4597 G1CollectedHeap* _g1h;
4598 OopClosure* _work;
4599 nmethod* _nm;
4600
4601 template <typename T>
4602 void do_oop_work(T* p) {
4603 _work->do_oop(p);
4604 T oop_or_narrowoop = oopDesc::load_heap_oop(p);
4605 if (!oopDesc::is_null(oop_or_narrowoop)) {
4606 oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
4607 HeapRegion* hr = _g1h->heap_region_containing_raw(o);
4608 assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
4609 hr->add_strong_code_root(_nm);
4610 }
4611 }
4612
4613 public:
4614 HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
4615
4616 void do_oop(oop* o) {
4617 do_oop_work(o);
4618 }
4619
4620 void do_oop(narrowOop* o) {
4621 do_oop_work(o);
4622 }
4623
4624 void set_nm(nmethod* nm) {
4625 _nm = nm;
4626 }
4627 };
4628
4629 HeapRegionGatheringOopClosure _oc;
4630 public:
4631 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4632
4633 void do_code_blob(CodeBlob* cb) {
4634 nmethod* nm = cb->as_nmethod_or_null();
4635 if (nm != NULL) {
4636 if (!nm->test_set_oops_do_mark()) {
4637 _oc.set_nm(nm);
4638 nm->oops_do(&_oc);
4639 nm->fix_oop_relocations();
4640 }
4641 }
4642 }
4643 };
4644
5000 class G1ParTask : public AbstractGangTask { 4645 class G1ParTask : public AbstractGangTask {
5001 protected: 4646 protected:
5002 G1CollectedHeap* _g1h; 4647 G1CollectedHeap* _g1h;
5003 RefToScanQueueSet *_queues; 4648 RefToScanQueueSet *_queues;
5004 ParallelTaskTerminator _terminator; 4649 ParallelTaskTerminator _terminator;
5005 uint _n_workers; 4650 uint _n_workers;
5006 4651
5007 Mutex _stats_lock; 4652 Mutex _stats_lock;
5008 Mutex* stats_lock() { return &_stats_lock; } 4653 Mutex* stats_lock() { return &_stats_lock; }
5009 4654
5010 size_t getNCards() {
5011 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
5012 / G1BlockOffsetSharedArray::N_bytes;
5013 }
5014
5015 public: 4655 public:
5016 G1ParTask(G1CollectedHeap* g1h, 4656 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
5017 RefToScanQueueSet *task_queues)
5018 : AbstractGangTask("G1 collection"), 4657 : AbstractGangTask("G1 collection"),
5019 _g1h(g1h), 4658 _g1h(g1h),
5020 _queues(task_queues), 4659 _queues(task_queues),
5021 _terminator(0, _queues), 4660 _terminator(0, _queues),
5022 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) 4661 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
5040 _g1h->set_n_termination(active_workers); 4679 _g1h->set_n_termination(active_workers);
5041 terminator()->reset_for_reuse(active_workers); 4680 terminator()->reset_for_reuse(active_workers);
5042 _n_workers = active_workers; 4681 _n_workers = active_workers;
5043 } 4682 }
5044 4683
4684 // Helps out with CLD processing.
4685 //
4686 // During InitialMark we need to:
4687 // 1) Scavenge all CLDs for the young GC.
4688 // 2) Mark all objects directly reachable from strong CLDs.
4689 template <G1Mark do_mark_object>
4690 class G1CLDClosure : public CLDClosure {
4691 G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
4692 G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
4693 G1KlassScanClosure _klass_in_cld_closure;
4694 bool _claim;
4695
4696 public:
4697 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4698 bool only_young, bool claim)
4699 : _oop_closure(oop_closure),
4700 _oop_in_klass_closure(oop_closure->g1(),
4701 oop_closure->pss(),
4702 oop_closure->rp()),
4703 _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4704 _claim(claim) {
4705
4706 }
4707
4708 void do_cld(ClassLoaderData* cld) {
4709 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4710 }
4711 };
4712
5045 void work(uint worker_id) { 4713 void work(uint worker_id) {
5046 if (worker_id >= _n_workers) return; // no work needed this round 4714 if (worker_id >= _n_workers) return; // no work needed this round
5047 4715
5048 double start_time_ms = os::elapsedTime() * 1000.0; 4716 double start_time_ms = os::elapsedTime() * 1000.0;
5049 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms); 4717 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
5057 G1ParScanThreadState pss(_g1h, worker_id, rp); 4725 G1ParScanThreadState pss(_g1h, worker_id, rp);
5058 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); 4726 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5059 4727
5060 pss.set_evac_failure_closure(&evac_failure_cl); 4728 pss.set_evac_failure_closure(&evac_failure_cl);
5061 4729
5062 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); 4730 bool only_young = _g1h->g1_policy()->gcs_are_young();
5063 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); 4731
5064 4732 // Non-IM young GC.
5065 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); 4733 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
5066 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp); 4734 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
5067 4735 only_young, // Only process dirty klasses.
5068 bool only_young = _g1h->g1_policy()->gcs_are_young(); 4736 false); // No need to claim CLDs.
5069 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false); 4737 // IM young GC.
5070 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young); 4738 // Strong roots closures.
5071 4739 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
5072 OopClosure* scan_root_cl = &only_scan_root_cl; 4740 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
5073 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s; 4741 false, // Process all klasses.
4742 true); // Need to claim CLDs.
4743 // Weak roots closures.
4744 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4745 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4746 false, // Process all klasses.
4747 true); // Need to claim CLDs.
4748
4749 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4750 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4751 // IM Weak code roots are handled later.
4752
4753 OopClosure* strong_root_cl;
4754 OopClosure* weak_root_cl;
4755 CLDClosure* strong_cld_cl;
4756 CLDClosure* weak_cld_cl;
4757 CodeBlobClosure* strong_code_cl;
5074 4758
5075 if (_g1h->g1_policy()->during_initial_mark_pause()) { 4759 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5076 // We also need to mark copied objects. 4760 // We also need to mark copied objects.
5077 scan_root_cl = &scan_mark_root_cl; 4761 strong_root_cl = &scan_mark_root_cl;
5078 scan_klasses_cl = &scan_mark_klasses_cl_s; 4762 strong_cld_cl = &scan_mark_cld_cl;
4763 strong_code_cl = &scan_mark_code_cl;
4764 if (ClassUnloadingWithConcurrentMark) {
4765 weak_root_cl = &scan_mark_weak_root_cl;
4766 weak_cld_cl = &scan_mark_weak_cld_cl;
4767 } else {
4768 weak_root_cl = &scan_mark_root_cl;
4769 weak_cld_cl = &scan_mark_cld_cl;
4770 }
4771 } else {
4772 strong_root_cl = &scan_only_root_cl;
4773 weak_root_cl = &scan_only_root_cl;
4774 strong_cld_cl = &scan_only_cld_cl;
4775 weak_cld_cl = &scan_only_cld_cl;
4776 strong_code_cl = &scan_only_code_cl;
5079 } 4777 }
5080 4778
5081 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); 4779
5082 4780 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
5083 // Don't scan the scavengable methods in the code cache as part
5084 // of strong root scanning. The code roots that point into a
5085 // region in the collection set are scanned when we scan the
5086 // region's RSet.
5087 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
5088 4781
5089 pss.start_strong_roots(); 4782 pss.start_strong_roots();
5090 _g1h->g1_process_strong_roots(/* is scavenging */ true, 4783 _g1h->g1_process_roots(strong_root_cl,
5091 SharedHeap::ScanningOption(so), 4784 weak_root_cl,
5092 scan_root_cl, 4785 &push_heap_rs_cl,
5093 &push_heap_rs_cl, 4786 strong_cld_cl,
5094 scan_klasses_cl, 4787 weak_cld_cl,
5095 worker_id); 4788 strong_code_cl,
4789 worker_id);
4790
5096 pss.end_strong_roots(); 4791 pss.end_strong_roots();
5097 4792
5098 { 4793 {
5099 double start = os::elapsedTime(); 4794 double start = os::elapsedTime();
5100 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); 4795 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
5110 if (ParallelGCVerbose) { 4805 if (ParallelGCVerbose) {
5111 MutexLocker x(stats_lock()); 4806 MutexLocker x(stats_lock());
5112 pss.print_termination_stats(worker_id); 4807 pss.print_termination_stats(worker_id);
5113 } 4808 }
5114 4809
5115 assert(pss.refs()->is_empty(), "should be empty"); 4810 assert(pss.queue_is_empty(), "should be empty");
5116 4811
5117 // Close the inner scope so that the ResourceMark and HandleMark 4812 // Close the inner scope so that the ResourceMark and HandleMark
5118 // destructors are executed here and are included as part of the 4813 // destructors are executed here and are included as part of the
5119 // "GC Worker Time". 4814 // "GC Worker Time".
5120 } 4815 }
5128 4823
5129 // This method is run in a GC worker. 4824 // This method is run in a GC worker.
5130 4825
5131 void 4826 void
5132 G1CollectedHeap:: 4827 G1CollectedHeap::
5133 g1_process_strong_roots(bool is_scavenging, 4828 g1_process_roots(OopClosure* scan_non_heap_roots,
5134 ScanningOption so, 4829 OopClosure* scan_non_heap_weak_roots,
5135 OopClosure* scan_non_heap_roots, 4830 OopsInHeapRegionClosure* scan_rs,
5136 OopsInHeapRegionClosure* scan_rs, 4831 CLDClosure* scan_strong_clds,
5137 G1KlassScanClosure* scan_klasses, 4832 CLDClosure* scan_weak_clds,
5138 uint worker_i) { 4833 CodeBlobClosure* scan_strong_code,
5139 4834 uint worker_i) {
5140 // First scan the strong roots 4835
4836 // First scan the shared roots.
5141 double ext_roots_start = os::elapsedTime(); 4837 double ext_roots_start = os::elapsedTime();
5142 double closure_app_time_sec = 0.0; 4838 double closure_app_time_sec = 0.0;
5143 4839
4840 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4841 bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4842
5144 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 4843 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5145 4844 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
5146 assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow"); 4845
5147 // Walk the code cache/strong code roots w/o buffering, because StarTask 4846 process_roots(false, // no scoping; this is parallel code
5148 // cannot handle unaligned oop locations. 4847 SharedHeap::SO_None,
5149 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */); 4848 &buf_scan_non_heap_roots,
5150 4849 &buf_scan_non_heap_weak_roots,
5151 process_strong_roots(false, // no scoping; this is parallel code 4850 scan_strong_clds,
5152 is_scavenging, so, 4851 // Unloading Initial Marks handle the weak CLDs separately.
5153 &buf_scan_non_heap_roots, 4852 (trace_metadata ? NULL : scan_weak_clds),
5154 &eager_scan_code_roots, 4853 scan_strong_code);
5155 scan_klasses
5156 );
5157 4854
5158 // Now the CM ref_processor roots. 4855 // Now the CM ref_processor roots.
5159 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 4856 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5160 // We need to treat the discovered reference lists of the 4857 // We need to treat the discovered reference lists of the
5161 // concurrent mark ref processor as roots and keep entries 4858 // concurrent mark ref processor as roots and keep entries
5162 // (which are added by the marking threads) on them live 4859 // (which are added by the marking threads) on them live
5163 // until they can be processed at the end of marking. 4860 // until they can be processed at the end of marking.
5164 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); 4861 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5165 } 4862 }
5166 4863
4864 if (trace_metadata) {
4865 // Barrier to make sure all workers passed
4866 // the strong CLD and strong nmethods phases.
4867 active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4868
4869 // Now take the complement of the strong CLDs.
4870 ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4871 }
4872
5167 // Finish up any enqueued closure apps (attributed as object copy time). 4873 // Finish up any enqueued closure apps (attributed as object copy time).
5168 buf_scan_non_heap_roots.done(); 4874 buf_scan_non_heap_roots.done();
5169 4875 buf_scan_non_heap_weak_roots.done();
5170 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds(); 4876
4877 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4878 + buf_scan_non_heap_weak_roots.closure_app_seconds();
5171 4879
5172 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); 4880 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
5173 4881
5174 double ext_root_time_ms = 4882 double ext_root_time_ms =
5175 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; 4883 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5189 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0; 4897 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5190 } 4898 }
5191 } 4899 }
5192 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); 4900 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5193 4901
5194 // If this is an initial mark pause, and we're not scanning
5195 // the entire code cache, we need to mark the oops in the
5196 // strong code root lists for the regions that are not in
5197 // the collection set.
5198 // Note all threads participate in this set of root tasks.
5199 double mark_strong_code_roots_ms = 0.0;
5200 if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
5201 double mark_strong_roots_start = os::elapsedTime();
5202 mark_strong_code_roots(worker_i);
5203 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5204 }
5205 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5206
5207 // Now scan the complement of the collection set. 4902 // Now scan the complement of the collection set.
5208 if (scan_rs != NULL) { 4903 G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
5209 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i); 4904
5210 } 4905 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4906
5211 _process_strong_tasks->all_tasks_completed(); 4907 _process_strong_tasks->all_tasks_completed();
5212 }
5213
5214 void
5215 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5216 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5217 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5218 } 4908 }
5219 4909
5220 class G1StringSymbolTableUnlinkTask : public AbstractGangTask { 4910 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
5221 private: 4911 private:
5222 BoolObjectClosure* _is_alive; 4912 BoolObjectClosure* _is_alive;
5232 int _symbols_removed; 4922 int _symbols_removed;
5233 4923
5234 bool _do_in_parallel; 4924 bool _do_in_parallel;
5235 public: 4925 public:
5236 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : 4926 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
5237 AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive), 4927 AbstractGangTask("String/Symbol Unlinking"),
4928 _is_alive(is_alive),
5238 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()), 4929 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
5239 _process_strings(process_strings), _strings_processed(0), _strings_removed(0), 4930 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
5240 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { 4931 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
5241 4932
5242 _initial_string_table_size = StringTable::the_table()->table_size(); 4933 _initial_string_table_size = StringTable::the_table()->table_size();
5254 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT, 4945 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
5255 StringTable::parallel_claimed_index(), _initial_string_table_size)); 4946 StringTable::parallel_claimed_index(), _initial_string_table_size));
5256 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, 4947 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
5257 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT, 4948 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
5258 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); 4949 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4950
4951 if (G1TraceStringSymbolTableScrubbing) {
4952 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4953 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4954 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4955 strings_processed(), strings_removed(),
4956 symbols_processed(), symbols_removed());
4957 }
5259 } 4958 }
5260 4959
5261 void work(uint worker_id) { 4960 void work(uint worker_id) {
5262 if (_do_in_parallel) { 4961 if (_do_in_parallel) {
5263 int strings_processed = 0; 4962 int strings_processed = 0;
5289 4988
5290 size_t symbols_processed() const { return (size_t)_symbols_processed; } 4989 size_t symbols_processed() const { return (size_t)_symbols_processed; }
5291 size_t symbols_removed() const { return (size_t)_symbols_removed; } 4990 size_t symbols_removed() const { return (size_t)_symbols_removed; }
5292 }; 4991 };
5293 4992
5294 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, 4993 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
5295 bool process_strings, bool process_symbols) { 4994 private:
4995 static Monitor* _lock;
4996
4997 BoolObjectClosure* const _is_alive;
4998 const bool _unloading_occurred;
4999 const uint _num_workers;
5000
5001 // Variables used to claim nmethods.
5002 nmethod* _first_nmethod;
5003 volatile nmethod* _claimed_nmethod;
5004
5005 // The list of nmethods that need to be processed by the second pass.
5006 volatile nmethod* _postponed_list;
5007 volatile uint _num_entered_barrier;
5008
5009 public:
5010 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
5011 _is_alive(is_alive),
5012 _unloading_occurred(unloading_occurred),
5013 _num_workers(num_workers),
5014 _first_nmethod(NULL),
5015 _claimed_nmethod(NULL),
5016 _postponed_list(NULL),
5017 _num_entered_barrier(0)
5018 {
5019 nmethod::increase_unloading_clock();
5020 _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
5021 _claimed_nmethod = (volatile nmethod*)_first_nmethod;
5022 }
5023
5024 ~G1CodeCacheUnloadingTask() {
5025 CodeCache::verify_clean_inline_caches();
5026
5027 CodeCache::set_needs_cache_clean(false);
5028 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
5029
5030 CodeCache::verify_icholder_relocations();
5031 }
5032
5033 private:
5034 void add_to_postponed_list(nmethod* nm) {
5035 nmethod* old;
5036 do {
5037 old = (nmethod*)_postponed_list;
5038 nm->set_unloading_next(old);
5039 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
5040 }
5041
5042 void clean_nmethod(nmethod* nm) {
5043 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
5044
5045 if (postponed) {
5046 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
5047 add_to_postponed_list(nm);
5048 }
5049
5050 // Mark that this thread has been cleaned/unloaded.
5051 // After this call, it will be safe to ask if this nmethod was unloaded or not.
5052 nm->set_unloading_clock(nmethod::global_unloading_clock());
5053 }
5054
5055 void clean_nmethod_postponed(nmethod* nm) {
5056 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
5057 }
5058
5059 static const int MaxClaimNmethods = 16;
5060
5061 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
5062 nmethod* first;
5063 nmethod* last;
5064
5065 do {
5066 *num_claimed_nmethods = 0;
5067
5068 first = last = (nmethod*)_claimed_nmethod;
5069
5070 if (first != NULL) {
5071 for (int i = 0; i < MaxClaimNmethods; i++) {
5072 last = CodeCache::alive_nmethod(CodeCache::next(last));
5073
5074 if (last == NULL) {
5075 break;
5076 }
5077
5078 claimed_nmethods[i] = last;
5079 (*num_claimed_nmethods)++;
5080 }
5081 }
5082
5083 } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
5084 }
5085
5086 nmethod* claim_postponed_nmethod() {
5087 nmethod* claim;
5088 nmethod* next;
5089
5090 do {
5091 claim = (nmethod*)_postponed_list;
5092 if (claim == NULL) {
5093 return NULL;
5094 }
5095
5096 next = claim->unloading_next();
5097
5098 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
5099
5100 return claim;
5101 }
5102
5103 public:
5104 // Mark that we're done with the first pass of nmethod cleaning.
5105 void barrier_mark(uint worker_id) {
5106 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5107 _num_entered_barrier++;
5108 if (_num_entered_barrier == _num_workers) {
5109 ml.notify_all();
5110 }
5111 }
5112
5113 // See if we have to wait for the other workers to
5114 // finish their first-pass nmethod cleaning work.
5115 void barrier_wait(uint worker_id) {
5116 if (_num_entered_barrier < _num_workers) {
5117 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5118 while (_num_entered_barrier < _num_workers) {
5119 ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
5120 }
5121 }
5122 }
5123
5124 // Cleaning and unloading of nmethods. Some work has to be postponed
5125 // to the second pass, when we know which nmethods survive.
5126 void work_first_pass(uint worker_id) {
5127 // The first nmethods is claimed by the first worker.
5128 if (worker_id == 0 && _first_nmethod != NULL) {
5129 clean_nmethod(_first_nmethod);
5130 _first_nmethod = NULL;
5131 }
5132
5133 int num_claimed_nmethods;
5134 nmethod* claimed_nmethods[MaxClaimNmethods];
5135
5136 while (true) {
5137 claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
5138
5139 if (num_claimed_nmethods == 0) {
5140 break;
5141 }
5142
5143 for (int i = 0; i < num_claimed_nmethods; i++) {
5144 clean_nmethod(claimed_nmethods[i]);
5145 }
5146 }
5147
5148 // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
5149 // Need to retire the buffers now that this thread has stopped cleaning nmethods.
5150 MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
5151 }
5152
5153 void work_second_pass(uint worker_id) {
5154 nmethod* nm;
5155 // Take care of postponed nmethods.
5156 while ((nm = claim_postponed_nmethod()) != NULL) {
5157 clean_nmethod_postponed(nm);
5158 }
5159 }
5160 };
5161
5162 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
5163
5164 class G1KlassCleaningTask : public StackObj {
5165 BoolObjectClosure* _is_alive;
5166 volatile jint _clean_klass_tree_claimed;
5167 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
5168
5169 public:
5170 G1KlassCleaningTask(BoolObjectClosure* is_alive) :
5171 _is_alive(is_alive),
5172 _clean_klass_tree_claimed(0),
5173 _klass_iterator() {
5174 }
5175
5176 private:
5177 bool claim_clean_klass_tree_task() {
5178 if (_clean_klass_tree_claimed) {
5179 return false;
5180 }
5181
5182 return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
5183 }
5184
5185 InstanceKlass* claim_next_klass() {
5186 Klass* klass;
5187 do {
5188 klass =_klass_iterator.next_klass();
5189 } while (klass != NULL && !klass->oop_is_instance());
5190
5191 return (InstanceKlass*)klass;
5192 }
5193
5194 public:
5195
5196 void clean_klass(InstanceKlass* ik) {
5197 ik->clean_implementors_list(_is_alive);
5198 ik->clean_method_data(_is_alive);
5199
5200 // G1 specific cleanup work that has
5201 // been moved here to be done in parallel.
5202 ik->clean_dependent_nmethods();
5203 if (JvmtiExport::has_redefined_a_class()) {
5204 InstanceKlass::purge_previous_versions(ik);
5205 }
5206 }
5207
5208 void work() {
5209 ResourceMark rm;
5210
5211 // One worker will clean the subklass/sibling klass tree.
5212 if (claim_clean_klass_tree_task()) {
5213 Klass::clean_subklass_tree(_is_alive);
5214 }
5215
5216 // All workers will help cleaning the classes,
5217 InstanceKlass* klass;
5218 while ((klass = claim_next_klass()) != NULL) {
5219 clean_klass(klass);
5220 }
5221 }
5222 };
5223
5224 // To minimize the remark pause times, the tasks below are done in parallel.
5225 class G1ParallelCleaningTask : public AbstractGangTask {
5226 private:
5227 G1StringSymbolTableUnlinkTask _string_symbol_task;
5228 G1CodeCacheUnloadingTask _code_cache_task;
5229 G1KlassCleaningTask _klass_cleaning_task;
5230
5231 public:
5232 // The constructor is run in the VMThread.
5233 G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
5234 AbstractGangTask("Parallel Cleaning"),
5235 _string_symbol_task(is_alive, process_strings, process_symbols),
5236 _code_cache_task(num_workers, is_alive, unloading_occurred),
5237 _klass_cleaning_task(is_alive) {
5238 }
5239
5240 void pre_work_verification() {
5241 // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.
5242 assert(Thread::current()->is_VM_thread()
5243 || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5244 }
5245
5246 void post_work_verification() {
5247 assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5248 }
5249
5250 // The parallel work done by all worker threads.
5251 void work(uint worker_id) {
5252 pre_work_verification();
5253
5254 // Do first pass of code cache cleaning.
5255 _code_cache_task.work_first_pass(worker_id);
5256
5257 // Let the threads mark that the first pass is done.
5258 _code_cache_task.barrier_mark(worker_id);
5259
5260 // Clean the Strings and Symbols.
5261 _string_symbol_task.work(worker_id);
5262
5263 // Wait for all workers to finish the first code cache cleaning pass.
5264 _code_cache_task.barrier_wait(worker_id);
5265
5266 // Do the second code cache cleaning work, which realize on
5267 // the liveness information gathered during the first pass.
5268 _code_cache_task.work_second_pass(worker_id);
5269
5270 // Clean all klasses that were not unloaded.
5271 _klass_cleaning_task.work();
5272
5273 post_work_verification();
5274 }
5275 };
5276
5277
5278 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5279 bool process_strings,
5280 bool process_symbols,
5281 bool class_unloading_occurred) {
5296 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 5282 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5297 _g1h->workers()->active_workers() : 1); 5283 workers()->active_workers() : 1);
5298 5284
5299 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); 5285 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5286 n_workers, class_unloading_occurred);
5300 if (G1CollectedHeap::use_parallel_gc_threads()) { 5287 if (G1CollectedHeap::use_parallel_gc_threads()) {
5301 set_par_threads(n_workers); 5288 set_par_threads(n_workers);
5302 workers()->run_task(&g1_unlink_task); 5289 workers()->run_task(&g1_unlink_task);
5303 set_par_threads(0); 5290 set_par_threads(0);
5304 } else { 5291 } else {
5305 g1_unlink_task.work(0); 5292 g1_unlink_task.work(0);
5306 } 5293 }
5307 if (G1TraceStringSymbolTableScrubbing) { 5294 }
5308 gclog_or_tty->print_cr("Cleaned string and symbol table, " 5295
5309 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, " 5296 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5310 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed", 5297 bool process_strings, bool process_symbols) {
5311 g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(), 5298 {
5312 g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed()); 5299 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5300 _g1h->workers()->active_workers() : 1);
5301 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5302 if (G1CollectedHeap::use_parallel_gc_threads()) {
5303 set_par_threads(n_workers);
5304 workers()->run_task(&g1_unlink_task);
5305 set_par_threads(0);
5306 } else {
5307 g1_unlink_task.work(0);
5308 }
5313 } 5309 }
5314 5310
5315 if (G1StringDedup::is_enabled()) { 5311 if (G1StringDedup::is_enabled()) {
5316 G1StringDedup::unlink(is_alive); 5312 G1StringDedup::unlink(is_alive);
5317 } 5313 }
5318 } 5314 }
5319 5315
5320 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { 5316 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5321 public: 5317 private:
5322 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 5318 DirtyCardQueueSet* _queue;
5323 *card_ptr = CardTableModRefBS::dirty_card_val(); 5319 public:
5324 return true; 5320 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5321
5322 virtual void work(uint worker_id) {
5323 double start_time = os::elapsedTime();
5324
5325 RedirtyLoggedCardTableEntryClosure cl;
5326 if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5327 _queue->par_apply_closure_to_all_completed_buffers(&cl);
5328 } else {
5329 _queue->apply_closure_to_all_completed_buffers(&cl);
5330 }
5331
5332 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5333 timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5334 timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5325 } 5335 }
5326 }; 5336 };
5327 5337
5328 void G1CollectedHeap::redirty_logged_cards() { 5338 void G1CollectedHeap::redirty_logged_cards() {
5329 guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
5330 double redirty_logged_cards_start = os::elapsedTime(); 5339 double redirty_logged_cards_start = os::elapsedTime();
5331 5340
5332 RedirtyLoggedCardTableEntryFastClosure redirty; 5341 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5333 dirty_card_queue_set().set_closure(&redirty); 5342 _g1h->workers()->active_workers() : 1);
5334 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 5343
5344 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5345 dirty_card_queue_set().reset_for_par_iteration();
5346 if (use_parallel_gc_threads()) {
5347 set_par_threads(n_workers);
5348 workers()->run_task(&redirty_task);
5349 set_par_threads(0);
5350 } else {
5351 redirty_task.work(0);
5352 }
5335 5353
5336 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); 5354 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5337 dcq.merge_bufferlists(&dirty_card_queue_set()); 5355 dcq.merge_bufferlists(&dirty_card_queue_set());
5338 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 5356 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5339 5357
5368 class G1KeepAliveClosure: public OopClosure { 5386 class G1KeepAliveClosure: public OopClosure {
5369 G1CollectedHeap* _g1; 5387 G1CollectedHeap* _g1;
5370 public: 5388 public:
5371 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 5389 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5372 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } 5390 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5373 void do_oop( oop* p) { 5391 void do_oop(oop* p) {
5374 oop obj = *p; 5392 oop obj = *p;
5375 5393 assert(obj != NULL, "the caller should have filtered out NULL values");
5376 if (_g1->obj_in_cs(obj)) { 5394
5395 G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
5396 if (cset_state == G1CollectedHeap::InNeither) {
5397 return;
5398 }
5399 if (cset_state == G1CollectedHeap::InCSet) {
5377 assert( obj->is_forwarded(), "invariant" ); 5400 assert( obj->is_forwarded(), "invariant" );
5378 *p = obj->forwardee(); 5401 *p = obj->forwardee();
5402 } else {
5403 assert(!obj->is_forwarded(), "invariant" );
5404 assert(cset_state == G1CollectedHeap::IsHumongous,
5405 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
5406 _g1->set_humongous_is_live(obj);
5379 } 5407 }
5380 } 5408 }
5381 }; 5409 };
5382 5410
5383 // Copying Keep Alive closure - can be called from both 5411 // Copying Keep Alive closure - can be called from both
5386 // and different queues. 5414 // and different queues.
5387 5415
5388 class G1CopyingKeepAliveClosure: public OopClosure { 5416 class G1CopyingKeepAliveClosure: public OopClosure {
5389 G1CollectedHeap* _g1h; 5417 G1CollectedHeap* _g1h;
5390 OopClosure* _copy_non_heap_obj_cl; 5418 OopClosure* _copy_non_heap_obj_cl;
5391 OopsInHeapRegionClosure* _copy_metadata_obj_cl;
5392 G1ParScanThreadState* _par_scan_state; 5419 G1ParScanThreadState* _par_scan_state;
5393 5420
5394 public: 5421 public:
5395 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, 5422 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5396 OopClosure* non_heap_obj_cl, 5423 OopClosure* non_heap_obj_cl,
5397 OopsInHeapRegionClosure* metadata_obj_cl,
5398 G1ParScanThreadState* pss): 5424 G1ParScanThreadState* pss):
5399 _g1h(g1h), 5425 _g1h(g1h),
5400 _copy_non_heap_obj_cl(non_heap_obj_cl), 5426 _copy_non_heap_obj_cl(non_heap_obj_cl),
5401 _copy_metadata_obj_cl(metadata_obj_cl),
5402 _par_scan_state(pss) 5427 _par_scan_state(pss)
5403 {} 5428 {}
5404 5429
5405 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 5430 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5406 virtual void do_oop( oop* p) { do_oop_work(p); } 5431 virtual void do_oop( oop* p) { do_oop_work(p); }
5407 5432
5408 template <class T> void do_oop_work(T* p) { 5433 template <class T> void do_oop_work(T* p) {
5409 oop obj = oopDesc::load_decode_heap_oop(p); 5434 oop obj = oopDesc::load_decode_heap_oop(p);
5410 5435
5411 if (_g1h->obj_in_cs(obj)) { 5436 if (_g1h->is_in_cset_or_humongous(obj)) {
5412 // If the referent object has been forwarded (either copied 5437 // If the referent object has been forwarded (either copied
5413 // to a new location or to itself in the event of an 5438 // to a new location or to itself in the event of an
5414 // evacuation failure) then we need to update the reference 5439 // evacuation failure) then we need to update the reference
5415 // field and, if both reference and referent are in the G1 5440 // field and, if both reference and referent are in the G1
5416 // heap, update the RSet for the referent. 5441 // heap, update the RSet for the referent.
5429 5454
5430 if (_g1h->is_in_g1_reserved(p)) { 5455 if (_g1h->is_in_g1_reserved(p)) {
5431 _par_scan_state->push_on_queue(p); 5456 _par_scan_state->push_on_queue(p);
5432 } else { 5457 } else {
5433 assert(!Metaspace::contains((const void*)p), 5458 assert(!Metaspace::contains((const void*)p),
5434 err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) " 5459 err_msg("Unexpectedly found a pointer from metadata: "
5435 PTR_FORMAT, p)); 5460 PTR_FORMAT, p));
5436 _copy_non_heap_obj_cl->do_oop(p); 5461 _copy_non_heap_obj_cl->do_oop(p);
5437 }
5438 } 5462 }
5439 } 5463 }
5464 }
5440 }; 5465 };
5441 5466
5442 // Serial drain queue closure. Called as the 'complete_gc' 5467 // Serial drain queue closure. Called as the 'complete_gc'
5443 // closure for each discovered list in some of the 5468 // closure for each discovered list in some of the
5444 // reference processing phases. 5469 // reference processing phases.
5524 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5549 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5525 5550
5526 pss.set_evac_failure_closure(&evac_failure_cl); 5551 pss.set_evac_failure_closure(&evac_failure_cl);
5527 5552
5528 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); 5553 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5529 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5530 5554
5531 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); 5555 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5532 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5533 5556
5534 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; 5557 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5535 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5536 5558
5537 if (_g1h->g1_policy()->during_initial_mark_pause()) { 5559 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5538 // We also need to mark copied objects. 5560 // We also need to mark copied objects.
5539 copy_non_heap_cl = &copy_mark_non_heap_cl; 5561 copy_non_heap_cl = &copy_mark_non_heap_cl;
5540 copy_metadata_cl = &copy_mark_metadata_cl;
5541 } 5562 }
5542 5563
5543 // Keep alive closure. 5564 // Keep alive closure.
5544 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); 5565 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5545 5566
5546 // Complete GC closure 5567 // Complete GC closure
5547 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); 5568 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5548 5569
5549 // Call the reference processing task's work routine. 5570 // Call the reference processing task's work routine.
5630 G1ParScanThreadState pss(_g1h, worker_id, NULL); 5651 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5631 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5652 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5632 5653
5633 pss.set_evac_failure_closure(&evac_failure_cl); 5654 pss.set_evac_failure_closure(&evac_failure_cl);
5634 5655
5635 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); 5656 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5636
5637 5657
5638 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); 5658 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5639 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5640 5659
5641 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); 5660 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5642 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5643 5661
5644 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; 5662 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5645 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5646 5663
5647 if (_g1h->g1_policy()->during_initial_mark_pause()) { 5664 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5648 // We also need to mark copied objects. 5665 // We also need to mark copied objects.
5649 copy_non_heap_cl = &copy_mark_non_heap_cl; 5666 copy_non_heap_cl = &copy_mark_non_heap_cl;
5650 copy_metadata_cl = &copy_mark_metadata_cl;
5651 } 5667 }
5652 5668
5653 // Is alive closure 5669 // Is alive closure
5654 G1AlwaysAliveClosure always_alive(_g1h); 5670 G1AlwaysAliveClosure always_alive(_g1h);
5655 5671
5656 // Copying keep alive closure. Applied to referent objects that need 5672 // Copying keep alive closure. Applied to referent objects that need
5657 // to be copied. 5673 // to be copied.
5658 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); 5674 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5659 5675
5660 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 5676 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5661 5677
5662 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); 5678 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5663 uint stride = MIN2(MAX2(_n_workers, 1U), limit); 5679 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5689 5705
5690 // Drain the queue - which may cause stealing 5706 // Drain the queue - which may cause stealing
5691 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator); 5707 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5692 drain_queue.do_void(); 5708 drain_queue.do_void();
5693 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure 5709 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5694 assert(pss.refs()->is_empty(), "should be"); 5710 assert(pss.queue_is_empty(), "should be");
5695 } 5711 }
5696 }; 5712 };
5697 5713
5698 // Weak Reference processing during an evacuation pause (part 1). 5714 // Weak Reference processing during an evacuation pause (part 1).
5699 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { 5715 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5756 // reference objects. 5772 // reference objects.
5757 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); 5773 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5758 5774
5759 pss.set_evac_failure_closure(&evac_failure_cl); 5775 pss.set_evac_failure_closure(&evac_failure_cl);
5760 5776
5761 assert(pss.refs()->is_empty(), "pre-condition"); 5777 assert(pss.queue_is_empty(), "pre-condition");
5762 5778
5763 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); 5779 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5764 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
5765 5780
5766 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); 5781 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5767 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5768 5782
5769 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; 5783 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5770 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5771 5784
5772 if (_g1h->g1_policy()->during_initial_mark_pause()) { 5785 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5773 // We also need to mark copied objects. 5786 // We also need to mark copied objects.
5774 copy_non_heap_cl = &copy_mark_non_heap_cl; 5787 copy_non_heap_cl = &copy_mark_non_heap_cl;
5775 copy_metadata_cl = &copy_mark_metadata_cl;
5776 } 5788 }
5777 5789
5778 // Keep alive closure. 5790 // Keep alive closure.
5779 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss); 5791 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5780 5792
5781 // Serial Complete GC closure 5793 // Serial Complete GC closure
5782 G1STWDrainQueueClosure drain_queue(this, &pss); 5794 G1STWDrainQueueClosure drain_queue(this, &pss);
5783 5795
5784 // Setup the soft refs policy... 5796 // Setup the soft refs policy...
5789 // Serial reference processing... 5801 // Serial reference processing...
5790 stats = rp->process_discovered_references(&is_alive, 5802 stats = rp->process_discovered_references(&is_alive,
5791 &keep_alive, 5803 &keep_alive,
5792 &drain_queue, 5804 &drain_queue,
5793 NULL, 5805 NULL,
5794 _gc_timer_stw); 5806 _gc_timer_stw,
5807 _gc_tracer_stw->gc_id());
5795 } else { 5808 } else {
5796 // Parallel reference processing 5809 // Parallel reference processing
5797 assert(rp->num_q() == no_of_gc_workers, "sanity"); 5810 assert(rp->num_q() == no_of_gc_workers, "sanity");
5798 assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); 5811 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5799 5812
5800 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); 5813 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5801 stats = rp->process_discovered_references(&is_alive, 5814 stats = rp->process_discovered_references(&is_alive,
5802 &keep_alive, 5815 &keep_alive,
5803 &drain_queue, 5816 &drain_queue,
5804 &par_task_executor, 5817 &par_task_executor,
5805 _gc_timer_stw); 5818 _gc_timer_stw,
5819 _gc_tracer_stw->gc_id());
5806 } 5820 }
5807 5821
5808 _gc_tracer_stw->report_gc_reference_stats(stats); 5822 _gc_tracer_stw->report_gc_reference_stats(stats);
5809 // We have completed copying any necessary live referent objects 5823
5810 // (that were not copied during the actual pause) so we can 5824 // We have completed copying any necessary live referent objects.
5811 // retire any active alloc buffers 5825 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5812 pss.retire_alloc_buffers();
5813 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5814 5826
5815 double ref_proc_time = os::elapsedTime() - ref_proc_start; 5827 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5816 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); 5828 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5817 } 5829 }
5818 5830
5893 double start_par_time_sec = os::elapsedTime(); 5905 double start_par_time_sec = os::elapsedTime();
5894 double end_par_time_sec; 5906 double end_par_time_sec;
5895 5907
5896 { 5908 {
5897 StrongRootsScope srs(this); 5909 StrongRootsScope srs(this);
5910 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5911 if (g1_policy()->during_initial_mark_pause()) {
5912 ClassLoaderDataGraph::clear_claimed_marks();
5913 }
5898 5914
5899 if (G1CollectedHeap::use_parallel_gc_threads()) { 5915 if (G1CollectedHeap::use_parallel_gc_threads()) {
5900 // The individual threads will set their evac-failure closures. 5916 // The individual threads will set their evac-failure closures.
5901 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); 5917 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5902 // These tasks use ShareHeap::_process_strong_tasks 5918 // These tasks use ShareHeap::_process_strong_tasks
5941 if (G1StringDedup::is_enabled()) { 5957 if (G1StringDedup::is_enabled()) {
5942 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive); 5958 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5943 } 5959 }
5944 } 5960 }
5945 5961
5946 release_gc_alloc_regions(n_workers, evacuation_info); 5962 _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5947 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 5963 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5948 5964
5949 // Reset and re-enable the hot card cache. 5965 // Reset and re-enable the hot card cache.
5950 // Note the counts for the cards in the regions in the 5966 // Note the counts for the cards in the regions in the
5951 // collection set are reset when the collection set is freed. 5967 // collection set are reset when the collection set is freed.
5952 hot_card_cache->reset_hot_cache(); 5968 hot_card_cache->reset_hot_cache();
5953 hot_card_cache->set_use_cache(true); 5969 hot_card_cache->set_use_cache(true);
5954
5955 // Migrate the strong code roots attached to each region in
5956 // the collection set. Ideally we would like to do this
5957 // after we have finished the scanning/evacuation of the
5958 // strong code roots for a particular heap region.
5959 migrate_strong_code_roots();
5960 5970
5961 purge_code_root_memory(); 5971 purge_code_root_memory();
5962 5972
5963 if (g1_policy()->during_initial_mark_pause()) { 5973 if (g1_policy()->during_initial_mark_pause()) {
5964 // Reset the claim values set during marking the strong code roots 5974 // Reset the claim values set during marking the strong code roots
5983 // will log these updates (and dirty their associated 5993 // will log these updates (and dirty their associated
5984 // cards). We need these updates logged to update any 5994 // cards). We need these updates logged to update any
5985 // RSets. 5995 // RSets.
5986 enqueue_discovered_references(n_workers); 5996 enqueue_discovered_references(n_workers);
5987 5997
5988 if (G1DeferredRSUpdate) { 5998 redirty_logged_cards();
5989 redirty_logged_cards();
5990 }
5991 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 5999 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5992 } 6000 }
5993 6001
5994 void G1CollectedHeap::free_region(HeapRegion* hr, 6002 void G1CollectedHeap::free_region(HeapRegion* hr,
5995 FreeRegionList* free_list, 6003 FreeRegionList* free_list,
5996 bool par, 6004 bool par,
5997 bool locked) { 6005 bool locked) {
5998 assert(!hr->isHumongous(), "this is only for non-humongous regions"); 6006 assert(!hr->is_free(), "the region should not be free");
5999 assert(!hr->is_empty(), "the region should not be empty"); 6007 assert(!hr->is_empty(), "the region should not be empty");
6008 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
6000 assert(free_list != NULL, "pre-condition"); 6009 assert(free_list != NULL, "pre-condition");
6010
6011 if (G1VerifyBitmaps) {
6012 MemRegion mr(hr->bottom(), hr->end());
6013 concurrent_mark()->clearRangePrevBitmap(mr);
6014 }
6001 6015
6002 // Clear the card counts for this region. 6016 // Clear the card counts for this region.
6003 // Note: we only need to do this if the region is not young 6017 // Note: we only need to do this if the region is not young
6004 // (since we don't refine cards in young regions). 6018 // (since we don't refine cards in young regions).
6005 if (!hr->is_young()) { 6019 if (!hr->is_young()) {
6017 6031
6018 size_t hr_capacity = hr->capacity(); 6032 size_t hr_capacity = hr->capacity();
6019 // We need to read this before we make the region non-humongous, 6033 // We need to read this before we make the region non-humongous,
6020 // otherwise the information will be gone. 6034 // otherwise the information will be gone.
6021 uint last_index = hr->last_hc_index(); 6035 uint last_index = hr->last_hc_index();
6022 hr->set_notHumongous(); 6036 hr->clear_humongous();
6023 free_region(hr, free_list, par); 6037 free_region(hr, free_list, par);
6024 6038
6025 uint i = hr->hrs_index() + 1; 6039 uint i = hr->hrm_index() + 1;
6026 while (i < last_index) { 6040 while (i < last_index) {
6027 HeapRegion* curr_hr = region_at(i); 6041 HeapRegion* curr_hr = region_at(i);
6028 assert(curr_hr->continuesHumongous(), "invariant"); 6042 assert(curr_hr->continuesHumongous(), "invariant");
6029 curr_hr->set_notHumongous(); 6043 curr_hr->clear_humongous();
6030 free_region(curr_hr, free_list, par); 6044 free_region(curr_hr, free_list, par);
6031 i += 1; 6045 i += 1;
6032 } 6046 }
6033 } 6047 }
6034 6048
6044 6058
6045 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { 6059 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
6046 assert(list != NULL, "list can't be null"); 6060 assert(list != NULL, "list can't be null");
6047 if (!list->is_empty()) { 6061 if (!list->is_empty()) {
6048 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 6062 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
6049 _free_list.add_ordered(list); 6063 _hrm.insert_list_into_free_list(list);
6050 } 6064 }
6051 } 6065 }
6052 6066
6053 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { 6067 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
6054 assert(_summary_bytes_used >= bytes, 6068 _allocator->decrease_used(bytes);
6055 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
6056 _summary_bytes_used, bytes));
6057 _summary_bytes_used -= bytes;
6058 } 6069 }
6059 6070
6060 class G1ParCleanupCTTask : public AbstractGangTask { 6071 class G1ParCleanupCTTask : public AbstractGangTask {
6061 G1SATBCardTableModRefBS* _ct_bs; 6072 G1SATBCardTableModRefBS* _ct_bs;
6062 G1CollectedHeap* _g1h; 6073 G1CollectedHeap* _g1h;
6131 } 6142 }
6132 6143
6133 void G1CollectedHeap::verify_dirty_young_regions() { 6144 void G1CollectedHeap::verify_dirty_young_regions() {
6134 verify_dirty_young_list(_young_list->first_region()); 6145 verify_dirty_young_list(_young_list->first_region());
6135 } 6146 }
6136 #endif 6147
6148 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
6149 HeapWord* tams, HeapWord* end) {
6150 guarantee(tams <= end,
6151 err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, tams, end));
6152 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
6153 if (result < end) {
6154 gclog_or_tty->cr();
6155 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT,
6156 bitmap_name, result);
6157 gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT,
6158 bitmap_name, tams, end);
6159 return false;
6160 }
6161 return true;
6162 }
6163
6164 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
6165 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
6166 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
6167
6168 HeapWord* bottom = hr->bottom();
6169 HeapWord* ptams = hr->prev_top_at_mark_start();
6170 HeapWord* ntams = hr->next_top_at_mark_start();
6171 HeapWord* end = hr->end();
6172
6173 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
6174
6175 bool res_n = true;
6176 // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
6177 // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
6178 // if we happen to be in that state.
6179 if (mark_in_progress() || !_cmThread->in_progress()) {
6180 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
6181 }
6182 if (!res_p || !res_n) {
6183 gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
6184 HR_FORMAT_PARAMS(hr));
6185 gclog_or_tty->print_cr("#### Caller: %s", caller);
6186 return false;
6187 }
6188 return true;
6189 }
6190
6191 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
6192 if (!G1VerifyBitmaps) return;
6193
6194 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
6195 }
6196
6197 class G1VerifyBitmapClosure : public HeapRegionClosure {
6198 private:
6199 const char* _caller;
6200 G1CollectedHeap* _g1h;
6201 bool _failures;
6202
6203 public:
6204 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
6205 _caller(caller), _g1h(g1h), _failures(false) { }
6206
6207 bool failures() { return _failures; }
6208
6209 virtual bool doHeapRegion(HeapRegion* hr) {
6210 if (hr->continuesHumongous()) return false;
6211
6212 bool result = _g1h->verify_bitmaps(_caller, hr);
6213 if (!result) {
6214 _failures = true;
6215 }
6216 return false;
6217 }
6218 };
6219
6220 void G1CollectedHeap::check_bitmaps(const char* caller) {
6221 if (!G1VerifyBitmaps) return;
6222
6223 G1VerifyBitmapClosure cl(caller, this);
6224 heap_region_iterate(&cl);
6225 guarantee(!cl.failures(), "bitmap verification");
6226 }
6227 #endif // PRODUCT
6137 6228
6138 void G1CollectedHeap::cleanUpCardTable() { 6229 void G1CollectedHeap::cleanUpCardTable() {
6139 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 6230 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6140 double start = os::elapsedTime(); 6231 double start = os::elapsedTime();
6141 6232
6252 } else { 6343 } else {
6253 cur->uninstall_surv_rate_group(); 6344 cur->uninstall_surv_rate_group();
6254 if (cur->is_young()) { 6345 if (cur->is_young()) {
6255 cur->set_young_index_in_cset(-1); 6346 cur->set_young_index_in_cset(-1);
6256 } 6347 }
6257 cur->set_not_young();
6258 cur->set_evacuation_failed(false); 6348 cur->set_evacuation_failed(false);
6259 // The region is now considered to be old. 6349 // The region is now considered to be old.
6350 cur->set_old();
6260 _old_set.add(cur); 6351 _old_set.add(cur);
6261 evacuation_info.increment_collectionset_used_after(cur->used()); 6352 evacuation_info.increment_collectionset_used_after(cur->used());
6262 } 6353 }
6263 cur = next; 6354 cur = next;
6264 } 6355 }
6278 6369
6279 prepend_to_freelist(&local_free_list); 6370 prepend_to_freelist(&local_free_list);
6280 decrement_summary_bytes(pre_used); 6371 decrement_summary_bytes(pre_used);
6281 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); 6372 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6282 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); 6373 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6374 }
6375
6376 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
6377 private:
6378 FreeRegionList* _free_region_list;
6379 HeapRegionSet* _proxy_set;
6380 HeapRegionSetCount _humongous_regions_removed;
6381 size_t _freed_bytes;
6382 public:
6383
6384 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
6385 _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
6386 }
6387
6388 virtual bool doHeapRegion(HeapRegion* r) {
6389 if (!r->startsHumongous()) {
6390 return false;
6391 }
6392
6393 G1CollectedHeap* g1h = G1CollectedHeap::heap();
6394
6395 oop obj = (oop)r->bottom();
6396 CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
6397
6398 // The following checks whether the humongous object is live are sufficient.
6399 // The main additional check (in addition to having a reference from the roots
6400 // or the young gen) is whether the humongous object has a remembered set entry.
6401 //
6402 // A humongous object cannot be live if there is no remembered set for it
6403 // because:
6404 // - there can be no references from within humongous starts regions referencing
6405 // the object because we never allocate other objects into them.
6406 // (I.e. there are no intra-region references that may be missed by the
6407 // remembered set)
6408 // - as soon there is a remembered set entry to the humongous starts region
6409 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6410 // until the end of a concurrent mark.
6411 //
6412 // It is not required to check whether the object has been found dead by marking
6413 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6414 // all objects allocated during that time are considered live.
6415 // SATB marking is even more conservative than the remembered set.
6416 // So if at this point in the collection there is no remembered set entry,
6417 // nobody has a reference to it.
6418 // At the start of collection we flush all refinement logs, and remembered sets
6419 // are completely up-to-date wrt to references to the humongous object.
6420 //
6421 // Other implementation considerations:
6422 // - never consider object arrays: while they are a valid target, they have not
6423 // been observed to be used as temporary objects.
6424 // - they would also pose considerable effort for cleaning up the the remembered
6425 // sets.
6426 // While this cleanup is not strictly necessary to be done (or done instantly),
6427 // given that their occurrence is very low, this saves us this additional
6428 // complexity.
6429 uint region_idx = r->hrm_index();
6430 if (g1h->humongous_is_live(region_idx) ||
6431 g1h->humongous_region_is_always_live(region_idx)) {
6432
6433 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6434 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6435 r->isHumongous(),
6436 region_idx,
6437 r->rem_set()->occupied(),
6438 r->rem_set()->strong_code_roots_list_length(),
6439 next_bitmap->isMarked(r->bottom()),
6440 g1h->humongous_is_live(region_idx),
6441 obj->is_objArray()
6442 );
6443 }
6444
6445 return false;
6446 }
6447
6448 guarantee(!obj->is_objArray(),
6449 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6450 r->bottom()));
6451
6452 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6453 gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6454 r->isHumongous(),
6455 r->bottom(),
6456 region_idx,
6457 r->region_num(),
6458 r->rem_set()->occupied(),
6459 r->rem_set()->strong_code_roots_list_length(),
6460 next_bitmap->isMarked(r->bottom()),
6461 g1h->humongous_is_live(region_idx),
6462 obj->is_objArray()
6463 );
6464 }
6465 // Need to clear mark bit of the humongous object if already set.
6466 if (next_bitmap->isMarked(r->bottom())) {
6467 next_bitmap->clear(r->bottom());
6468 }
6469 _freed_bytes += r->used();
6470 r->set_containing_set(NULL);
6471 _humongous_regions_removed.increment(1u, r->capacity());
6472 g1h->free_humongous_region(r, _free_region_list, false);
6473
6474 return false;
6475 }
6476
6477 HeapRegionSetCount& humongous_free_count() {
6478 return _humongous_regions_removed;
6479 }
6480
6481 size_t bytes_freed() const {
6482 return _freed_bytes;
6483 }
6484
6485 size_t humongous_reclaimed() const {
6486 return _humongous_regions_removed.length();
6487 }
6488 };
6489
6490 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6491 assert_at_safepoint(true);
6492
6493 if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
6494 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6495 return;
6496 }
6497
6498 double start_time = os::elapsedTime();
6499
6500 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6501
6502 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6503 heap_region_iterate(&cl);
6504
6505 HeapRegionSetCount empty_set;
6506 remove_from_old_sets(empty_set, cl.humongous_free_count());
6507
6508 G1HRPrinter* hr_printer = _g1h->hr_printer();
6509 if (hr_printer->is_active()) {
6510 FreeRegionListIterator iter(&local_cleanup_list);
6511 while (iter.more_available()) {
6512 HeapRegion* hr = iter.get_next();
6513 hr_printer->cleanup(hr);
6514 }
6515 }
6516
6517 prepend_to_freelist(&local_cleanup_list);
6518 decrement_summary_bytes(cl.bytes_freed());
6519
6520 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
6521 cl.humongous_reclaimed());
6283 } 6522 }
6284 6523
6285 // This routine is similar to the above but does not record 6524 // This routine is similar to the above but does not record
6286 // any policy statistics or update free lists; we are abandoning 6525 // any policy statistics or update free lists; we are abandoning
6287 // the current incremental collection set in preparation of a 6526 // the current incremental collection set in preparation of a
6393 6632
6394 public: 6633 public:
6395 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { } 6634 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6396 6635
6397 bool doHeapRegion(HeapRegion* r) { 6636 bool doHeapRegion(HeapRegion* r) {
6398 if (r->is_empty()) { 6637 if (r->is_old()) {
6399 // We ignore empty regions, we'll empty the free list afterwards 6638 _old_set->remove(r);
6400 } else if (r->is_young()) { 6639 } else {
6401 // We ignore young regions, we'll empty the young list afterwards 6640 // We ignore free regions, we'll empty the free list afterwards.
6402 } else if (r->isHumongous()) { 6641 // We ignore young regions, we'll empty the young list afterwards.
6403 // We ignore humongous regions, we're not tearing down the 6642 // We ignore humongous regions, we're not tearing down the
6404 // humongous region set 6643 // humongous regions set.
6405 } else { 6644 assert(r->is_free() || r->is_young() || r->isHumongous(),
6406 // The rest should be old 6645 "it cannot be another type");
6407 _old_set->remove(r);
6408 } 6646 }
6409 return false; 6647 return false;
6410 } 6648 }
6411 6649
6412 ~TearDownRegionSetsClosure() { 6650 ~TearDownRegionSetsClosure() {
6424 // Note that emptying the _young_list is postponed and instead done as 6662 // Note that emptying the _young_list is postponed and instead done as
6425 // the first step when rebuilding the regions sets again. The reason for 6663 // the first step when rebuilding the regions sets again. The reason for
6426 // this is that during a full GC string deduplication needs to know if 6664 // this is that during a full GC string deduplication needs to know if
6427 // a collected region was young or old when the full GC was initiated. 6665 // a collected region was young or old when the full GC was initiated.
6428 } 6666 }
6429 _free_list.remove_all(); 6667 _hrm.remove_all_free_regions();
6430 } 6668 }
6431 6669
6432 class RebuildRegionSetsClosure : public HeapRegionClosure { 6670 class RebuildRegionSetsClosure : public HeapRegionClosure {
6433 private: 6671 private:
6434 bool _free_list_only; 6672 bool _free_list_only;
6435 HeapRegionSet* _old_set; 6673 HeapRegionSet* _old_set;
6436 FreeRegionList* _free_list; 6674 HeapRegionManager* _hrm;
6437 size_t _total_used; 6675 size_t _total_used;
6438 6676
6439 public: 6677 public:
6440 RebuildRegionSetsClosure(bool free_list_only, 6678 RebuildRegionSetsClosure(bool free_list_only,
6441 HeapRegionSet* old_set, FreeRegionList* free_list) : 6679 HeapRegionSet* old_set, HeapRegionManager* hrm) :
6442 _free_list_only(free_list_only), 6680 _free_list_only(free_list_only),
6443 _old_set(old_set), _free_list(free_list), _total_used(0) { 6681 _old_set(old_set), _hrm(hrm), _total_used(0) {
6444 assert(_free_list->is_empty(), "pre-condition"); 6682 assert(_hrm->num_free_regions() == 0, "pre-condition");
6445 if (!free_list_only) { 6683 if (!free_list_only) {
6446 assert(_old_set->is_empty(), "pre-condition"); 6684 assert(_old_set->is_empty(), "pre-condition");
6447 } 6685 }
6448 } 6686 }
6449 6687
6452 return false; 6690 return false;
6453 } 6691 }
6454 6692
6455 if (r->is_empty()) { 6693 if (r->is_empty()) {
6456 // Add free regions to the free list 6694 // Add free regions to the free list
6457 _free_list->add_as_tail(r); 6695 r->set_free();
6696 r->set_allocation_context(AllocationContext::system());
6697 _hrm->insert_into_free_list(r);
6458 } else if (!_free_list_only) { 6698 } else if (!_free_list_only) {
6459 assert(!r->is_young(), "we should not come across young regions"); 6699 assert(!r->is_young(), "we should not come across young regions");
6460 6700
6461 if (r->isHumongous()) { 6701 if (r->isHumongous()) {
6462 // We ignore humongous regions, we left the humongous set unchanged 6702 // We ignore humongous regions, we left the humongous set unchanged
6463 } else { 6703 } else {
6464 // The rest should be old, add them to the old set 6704 // Objects that were compacted would have ended up on regions
6705 // that were previously old or free.
6706 assert(r->is_free() || r->is_old(), "invariant");
6707 // We now consider them old, so register as such.
6708 r->set_old();
6465 _old_set->add(r); 6709 _old_set->add(r);
6466 } 6710 }
6467 _total_used += r->used(); 6711 _total_used += r->used();
6468 } 6712 }
6469 6713
6480 6724
6481 if (!free_list_only) { 6725 if (!free_list_only) {
6482 _young_list->empty_list(); 6726 _young_list->empty_list();
6483 } 6727 }
6484 6728
6485 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); 6729 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6486 heap_region_iterate(&cl); 6730 heap_region_iterate(&cl);
6487 6731
6488 if (!free_list_only) { 6732 if (!free_list_only) {
6489 _summary_bytes_used = cl.total_used(); 6733 _allocator->set_used(cl.total_used());
6490 } 6734 }
6491 assert(_summary_bytes_used == recalculate_used(), 6735 assert(_allocator->used_unlocked() == recalculate_used(),
6492 err_msg("inconsistent _summary_bytes_used, " 6736 err_msg("inconsistent _allocator->used_unlocked(), "
6493 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, 6737 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6494 _summary_bytes_used, recalculate_used())); 6738 _allocator->used_unlocked(), recalculate_used()));
6495 } 6739 }
6496 6740
6497 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 6741 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6498 _refine_cte_cl->set_concurrent(concurrent); 6742 _refine_cte_cl->set_concurrent(concurrent);
6499 } 6743 }
6500 6744
6501 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 6745 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6502 HeapRegion* hr = heap_region_containing(p); 6746 HeapRegion* hr = heap_region_containing(p);
6503 if (hr == NULL) { 6747 return hr->is_in(p);
6504 return false;
6505 } else {
6506 return hr->is_in(p);
6507 }
6508 } 6748 }
6509 6749
6510 // Methods for the mutator alloc region 6750 // Methods for the mutator alloc region
6511 6751
6512 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, 6752 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6520 false /* is_old */, 6760 false /* is_old */,
6521 false /* do_expand */); 6761 false /* do_expand */);
6522 if (new_alloc_region != NULL) { 6762 if (new_alloc_region != NULL) {
6523 set_region_short_lived_locked(new_alloc_region); 6763 set_region_short_lived_locked(new_alloc_region);
6524 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); 6764 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6765 check_bitmaps("Mutator Region Allocation", new_alloc_region);
6525 return new_alloc_region; 6766 return new_alloc_region;
6526 } 6767 }
6527 } 6768 }
6528 return NULL; 6769 return NULL;
6529 } 6770 }
6530 6771
6531 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, 6772 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6532 size_t allocated_bytes) { 6773 size_t allocated_bytes) {
6533 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6774 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6534 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); 6775 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6535 6776
6536 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); 6777 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6537 _summary_bytes_used += allocated_bytes; 6778 _allocator->increase_used(allocated_bytes);
6538 _hr_printer.retire(alloc_region); 6779 _hr_printer.retire(alloc_region);
6539 // We update the eden sizes here, when the region is retired, 6780 // We update the eden sizes here, when the region is retired,
6540 // instead of when it's allocated, since this is the point that its 6781 // instead of when it's allocated, since this is the point that its
6541 // used space has been recored in _summary_bytes_used. 6782 // used space has been recored in _summary_bytes_used.
6542 g1mm()->update_eden_size(); 6783 g1mm()->update_eden_size();
6543 }
6544
6545 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6546 bool force) {
6547 return _g1h->new_mutator_alloc_region(word_size, force);
6548 } 6784 }
6549 6785
6550 void G1CollectedHeap::set_par_threads() { 6786 void G1CollectedHeap::set_par_threads() {
6551 // Don't change the number of workers. Use the value previously set 6787 // Don't change the number of workers. Use the value previously set
6552 // in the workgroup. 6788 // in the workgroup.
6561 workers()->set_active_workers(n_workers); 6797 workers()->set_active_workers(n_workers);
6562 } 6798 }
6563 set_par_threads(n_workers); 6799 set_par_threads(n_workers);
6564 } 6800 }
6565 6801
6566 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
6567 size_t allocated_bytes) {
6568 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
6569 }
6570
6571 // Methods for the GC alloc regions 6802 // Methods for the GC alloc regions
6572 6803
6573 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, 6804 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6574 uint count, 6805 uint count,
6575 GCAllocPurpose ap) { 6806 GCAllocPurpose ap) {
6582 true /* do_expand */); 6813 true /* do_expand */);
6583 if (new_alloc_region != NULL) { 6814 if (new_alloc_region != NULL) {
6584 // We really only need to do this for old regions given that we 6815 // We really only need to do this for old regions given that we
6585 // should never scan survivors. But it doesn't hurt to do it 6816 // should never scan survivors. But it doesn't hurt to do it
6586 // for survivors too. 6817 // for survivors too.
6587 new_alloc_region->set_saved_mark(); 6818 new_alloc_region->record_top_and_timestamp();
6588 if (survivor) { 6819 if (survivor) {
6589 new_alloc_region->set_survivor(); 6820 new_alloc_region->set_survivor();
6590 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); 6821 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6822 check_bitmaps("Survivor Region Allocation", new_alloc_region);
6591 } else { 6823 } else {
6824 new_alloc_region->set_old();
6592 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); 6825 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6826 check_bitmaps("Old Region Allocation", new_alloc_region);
6593 } 6827 }
6594 bool during_im = g1_policy()->during_initial_mark_pause(); 6828 bool during_im = g1_policy()->during_initial_mark_pause();
6595 new_alloc_region->note_start_of_copying(during_im); 6829 new_alloc_region->note_start_of_copying(during_im);
6596 return new_alloc_region; 6830 return new_alloc_region;
6597 } else { 6831 } else {
6613 _old_set.add(alloc_region); 6847 _old_set.add(alloc_region);
6614 } 6848 }
6615 _hr_printer.retire(alloc_region); 6849 _hr_printer.retire(alloc_region);
6616 } 6850 }
6617 6851
6618 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
6619 bool force) {
6620 assert(!force, "not supported for GC alloc regions");
6621 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
6622 }
6623
6624 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
6625 size_t allocated_bytes) {
6626 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6627 GCAllocForSurvived);
6628 }
6629
6630 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
6631 bool force) {
6632 assert(!force, "not supported for GC alloc regions");
6633 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
6634 }
6635
6636 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
6637 size_t allocated_bytes) {
6638 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6639 GCAllocForTenured);
6640 }
6641 // Heap region set verification 6852 // Heap region set verification
6642 6853
6643 class VerifyRegionListsClosure : public HeapRegionClosure { 6854 class VerifyRegionListsClosure : public HeapRegionClosure {
6644 private: 6855 private:
6645 HeapRegionSet* _old_set; 6856 HeapRegionSet* _old_set;
6646 HeapRegionSet* _humongous_set; 6857 HeapRegionSet* _humongous_set;
6647 FreeRegionList* _free_list; 6858 HeapRegionManager* _hrm;
6648 6859
6649 public: 6860 public:
6650 HeapRegionSetCount _old_count; 6861 HeapRegionSetCount _old_count;
6651 HeapRegionSetCount _humongous_count; 6862 HeapRegionSetCount _humongous_count;
6652 HeapRegionSetCount _free_count; 6863 HeapRegionSetCount _free_count;
6653 6864
6654 VerifyRegionListsClosure(HeapRegionSet* old_set, 6865 VerifyRegionListsClosure(HeapRegionSet* old_set,
6655 HeapRegionSet* humongous_set, 6866 HeapRegionSet* humongous_set,
6656 FreeRegionList* free_list) : 6867 HeapRegionManager* hrm) :
6657 _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list), 6868 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6658 _old_count(), _humongous_count(), _free_count(){ } 6869 _old_count(), _humongous_count(), _free_count(){ }
6659 6870
6660 bool doHeapRegion(HeapRegion* hr) { 6871 bool doHeapRegion(HeapRegion* hr) {
6661 if (hr->continuesHumongous()) { 6872 if (hr->continuesHumongous()) {
6662 return false; 6873 return false;
6663 } 6874 }
6664 6875
6665 if (hr->is_young()) { 6876 if (hr->is_young()) {
6666 // TODO 6877 // TODO
6667 } else if (hr->startsHumongous()) { 6878 } else if (hr->startsHumongous()) {
6668 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num())); 6879 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6669 _humongous_count.increment(1u, hr->capacity()); 6880 _humongous_count.increment(1u, hr->capacity());
6670 } else if (hr->is_empty()) { 6881 } else if (hr->is_empty()) {
6671 assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num())); 6882 assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6672 _free_count.increment(1u, hr->capacity()); 6883 _free_count.increment(1u, hr->capacity());
6884 } else if (hr->is_old()) {
6885 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6886 _old_count.increment(1u, hr->capacity());
6673 } else { 6887 } else {
6674 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num())); 6888 ShouldNotReachHere();
6675 _old_count.increment(1u, hr->capacity());
6676 } 6889 }
6677 return false; 6890 return false;
6678 } 6891 }
6679 6892
6680 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) { 6893 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6681 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length())); 6894 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6682 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6895 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6683 old_set->total_capacity_bytes(), _old_count.capacity())); 6896 old_set->total_capacity_bytes(), _old_count.capacity()));
6684 6897
6685 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length())); 6898 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6686 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6899 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6687 humongous_set->total_capacity_bytes(), _humongous_count.capacity())); 6900 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6688 6901
6689 guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length())); 6902 guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6690 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6903 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6691 free_list->total_capacity_bytes(), _free_count.capacity())); 6904 free_list->total_capacity_bytes(), _free_count.capacity()));
6692 } 6905 }
6693 }; 6906 };
6694 6907
6695 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6696 HeapWord* bottom) {
6697 HeapWord* end = bottom + HeapRegion::GrainWords;
6698 MemRegion mr(bottom, end);
6699 assert(_g1_reserved.contains(mr), "invariant");
6700 // This might return NULL if the allocation fails
6701 return new HeapRegion(hrs_index, _bot_shared, mr);
6702 }
6703
6704 void G1CollectedHeap::verify_region_sets() { 6908 void G1CollectedHeap::verify_region_sets() {
6705 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6909 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6706 6910
6707 // First, check the explicit lists. 6911 // First, check the explicit lists.
6708 _free_list.verify_list(); 6912 _hrm.verify();
6709 { 6913 {
6710 // Given that a concurrent operation might be adding regions to 6914 // Given that a concurrent operation might be adding regions to
6711 // the secondary free list we have to take the lock before 6915 // the secondary free list we have to take the lock before
6712 // verifying it. 6916 // verifying it.
6713 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6917 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6734 append_secondary_free_list_if_not_empty_with_lock(); 6938 append_secondary_free_list_if_not_empty_with_lock();
6735 6939
6736 // Finally, make sure that the region accounting in the lists is 6940 // Finally, make sure that the region accounting in the lists is
6737 // consistent with what we see in the heap. 6941 // consistent with what we see in the heap.
6738 6942
6739 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); 6943 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6740 heap_region_iterate(&cl); 6944 heap_region_iterate(&cl);
6741 cl.verify_counts(&_old_set, &_humongous_set, &_free_list); 6945 cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6742 } 6946 }
6743 6947
6744 // Optimized nmethod scanning 6948 // Optimized nmethod scanning
6745 6949
6746 class RegisterNMethodOopClosure: public OopClosure { 6950 class RegisterNMethodOopClosure: public OopClosure {
6755 assert(!hr->continuesHumongous(), 6959 assert(!hr->continuesHumongous(),
6756 err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT 6960 err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6757 " starting at "HR_FORMAT, 6961 " starting at "HR_FORMAT,
6758 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); 6962 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6759 6963
6760 // HeapRegion::add_strong_code_root() avoids adding duplicate 6964 // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6761 // entries but having duplicates is OK since we "mark" nmethods 6965 hr->add_strong_code_root_locked(_nm);
6762 // as visited when we scan the strong code root lists during the GC.
6763 hr->add_strong_code_root(_nm);
6764 assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
6765 err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
6766 _nm, HR_FORMAT_PARAMS(hr)));
6767 } 6966 }
6768 } 6967 }
6769 6968
6770 public: 6969 public:
6771 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : 6970 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6788 err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT 6987 err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6789 " starting at "HR_FORMAT, 6988 " starting at "HR_FORMAT,
6790 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); 6989 _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6791 6990
6792 hr->remove_strong_code_root(_nm); 6991 hr->remove_strong_code_root(_nm);
6793 assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
6794 err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
6795 _nm, HR_FORMAT_PARAMS(hr)));
6796 } 6992 }
6797 } 6993 }
6798 6994
6799 public: 6995 public:
6800 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : 6996 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6818 guarantee(nm != NULL, "sanity"); 7014 guarantee(nm != NULL, "sanity");
6819 UnregisterNMethodOopClosure reg_cl(this, nm); 7015 UnregisterNMethodOopClosure reg_cl(this, nm);
6820 nm->oops_do(&reg_cl, true); 7016 nm->oops_do(&reg_cl, true);
6821 } 7017 }
6822 7018
6823 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
6824 public:
6825 bool doHeapRegion(HeapRegion *hr) {
6826 assert(!hr->isHumongous(),
6827 err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
6828 HR_FORMAT_PARAMS(hr)));
6829 hr->migrate_strong_code_roots();
6830 return false;
6831 }
6832 };
6833
6834 void G1CollectedHeap::migrate_strong_code_roots() {
6835 MigrateCodeRootsHeapRegionClosure cl;
6836 double migrate_start = os::elapsedTime();
6837 collection_set_iterate(&cl);
6838 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6839 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6840 }
6841
6842 void G1CollectedHeap::purge_code_root_memory() { 7019 void G1CollectedHeap::purge_code_root_memory() {
6843 double purge_start = os::elapsedTime(); 7020 double purge_start = os::elapsedTime();
6844 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent); 7021 G1CodeRootSet::purge();
6845 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; 7022 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
6846 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); 7023 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
6847 }
6848
6849 // Mark all the code roots that point into regions *not* in the
6850 // collection set.
6851 //
6852 // Note we do not want to use a "marking" CodeBlobToOopClosure while
6853 // walking the the code roots lists of regions not in the collection
6854 // set. Suppose we have an nmethod (M) that points to objects in two
6855 // separate regions - one in the collection set (R1) and one not (R2).
6856 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
6857 // nmethod M when walking the code roots for R1. When we come to scan
6858 // the code roots for R2, we would see that M is already marked and it
6859 // would be skipped and the objects in R2 that are referenced from M
6860 // would not be evacuated.
6861
6862 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
6863
6864 class MarkStrongCodeRootOopClosure: public OopClosure {
6865 ConcurrentMark* _cm;
6866 HeapRegion* _hr;
6867 uint _worker_id;
6868
6869 template <class T> void do_oop_work(T* p) {
6870 T heap_oop = oopDesc::load_heap_oop(p);
6871 if (!oopDesc::is_null(heap_oop)) {
6872 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6873 // Only mark objects in the region (which is assumed
6874 // to be not in the collection set).
6875 if (_hr->is_in(obj)) {
6876 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
6877 }
6878 }
6879 }
6880
6881 public:
6882 MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
6883 _cm(cm), _hr(hr), _worker_id(worker_id) {
6884 assert(!_hr->in_collection_set(), "sanity");
6885 }
6886
6887 void do_oop(narrowOop* p) { do_oop_work(p); }
6888 void do_oop(oop* p) { do_oop_work(p); }
6889 };
6890
6891 MarkStrongCodeRootOopClosure _oop_cl;
6892
6893 public:
6894 MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
6895 _oop_cl(cm, hr, worker_id) {}
6896
6897 void do_code_blob(CodeBlob* cb) {
6898 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
6899 if (nm != NULL) {
6900 nm->oops_do(&_oop_cl);
6901 }
6902 }
6903 };
6904
6905 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
6906 G1CollectedHeap* _g1h;
6907 uint _worker_id;
6908
6909 public:
6910 MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
6911 _g1h(g1h), _worker_id(worker_id) {}
6912
6913 bool doHeapRegion(HeapRegion *hr) {
6914 HeapRegionRemSet* hrrs = hr->rem_set();
6915 if (hr->continuesHumongous()) {
6916 // Code roots should never be attached to a continuation of a humongous region
6917 assert(hrrs->strong_code_roots_list_length() == 0,
6918 err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
6919 " starting at "HR_FORMAT", but has "SIZE_FORMAT,
6920 HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
6921 hrrs->strong_code_roots_list_length()));
6922 return false;
6923 }
6924
6925 if (hr->in_collection_set()) {
6926 // Don't mark code roots into regions in the collection set here.
6927 // They will be marked when we scan them.
6928 return false;
6929 }
6930
6931 MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
6932 hr->strong_code_roots_do(&cb_cl);
6933 return false;
6934 }
6935 };
6936
6937 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
6938 MarkStrongCodeRootsHRClosure cl(this, worker_id);
6939 if (G1CollectedHeap::use_parallel_gc_threads()) {
6940 heap_region_par_iterate_chunked(&cl,
6941 worker_id,
6942 workers()->active_workers(),
6943 HeapRegion::ParMarkRootClaimValue);
6944 } else {
6945 heap_region_iterate(&cl);
6946 }
6947 } 7024 }
6948 7025
6949 class RebuildStrongCodeRootClosure: public CodeBlobClosure { 7026 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6950 G1CollectedHeap* _g1h; 7027 G1CollectedHeap* _g1h;
6951 7028
6957 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL; 7034 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
6958 if (nm == NULL) { 7035 if (nm == NULL) {
6959 return; 7036 return;
6960 } 7037 }
6961 7038
6962 if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) { 7039 if (ScavengeRootsInCode) {
6963 _g1h->register_nmethod(nm); 7040 _g1h->register_nmethod(nm);
6964 } 7041 }
6965 } 7042 }
6966 }; 7043 };
6967 7044