comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 4ca6dc0799b6 a45a4f5a9609
children 7848fc12602b
comparison
equal deleted inserted replaced
17606:45d7b2c7029d 18041:52b4284cb496
1 /* 1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
20 * or visit www.oracle.com if you need additional information or have any 20 * or visit www.oracle.com if you need additional information or have any
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #if !defined(__clang_major__) && defined(__GNUC__)
26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
27 #endif
28
25 #include "precompiled.hpp" 29 #include "precompiled.hpp"
26 #include "code/codeCache.hpp" 30 #include "code/codeCache.hpp"
27 #include "code/icBuffer.hpp" 31 #include "code/icBuffer.hpp"
28 #include "gc_implementation/g1/bufferingOopClosure.hpp" 32 #include "gc_implementation/g1/bufferingOopClosure.hpp"
29 #include "gc_implementation/g1/concurrentG1Refine.hpp" 33 #include "gc_implementation/g1/concurrentG1Refine.hpp"
37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
38 #include "gc_implementation/g1/g1Log.hpp" 42 #include "gc_implementation/g1/g1Log.hpp"
39 #include "gc_implementation/g1/g1MarkSweep.hpp" 43 #include "gc_implementation/g1/g1MarkSweep.hpp"
40 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 44 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
41 #include "gc_implementation/g1/g1RemSet.inline.hpp" 45 #include "gc_implementation/g1/g1RemSet.inline.hpp"
46 #include "gc_implementation/g1/g1StringDedup.hpp"
42 #include "gc_implementation/g1/g1YCTypes.hpp" 47 #include "gc_implementation/g1/g1YCTypes.hpp"
43 #include "gc_implementation/g1/heapRegion.inline.hpp" 48 #include "gc_implementation/g1/heapRegion.inline.hpp"
44 #include "gc_implementation/g1/heapRegionRemSet.hpp" 49 #include "gc_implementation/g1/heapRegionRemSet.hpp"
45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 50 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
46 #include "gc_implementation/g1/vm_operations_g1.hpp" 51 #include "gc_implementation/g1/vm_operations_g1.hpp"
48 #include "gc_implementation/shared/gcTimer.hpp" 53 #include "gc_implementation/shared/gcTimer.hpp"
49 #include "gc_implementation/shared/gcTrace.hpp" 54 #include "gc_implementation/shared/gcTrace.hpp"
50 #include "gc_implementation/shared/gcTraceTime.hpp" 55 #include "gc_implementation/shared/gcTraceTime.hpp"
51 #include "gc_implementation/shared/isGCActiveMark.hpp" 56 #include "gc_implementation/shared/isGCActiveMark.hpp"
52 #include "memory/gcLocker.inline.hpp" 57 #include "memory/gcLocker.inline.hpp"
53 #include "memory/genOopClosures.inline.hpp"
54 #include "memory/generationSpec.hpp" 58 #include "memory/generationSpec.hpp"
59 #include "memory/iterator.hpp"
55 #include "memory/referenceProcessor.hpp" 60 #include "memory/referenceProcessor.hpp"
56 #include "oops/oop.inline.hpp" 61 #include "oops/oop.inline.hpp"
57 #include "oops/oop.pcgc.inline.hpp" 62 #include "oops/oop.pcgc.inline.hpp"
58 #include "runtime/vmThread.hpp" 63 #include "runtime/vmThread.hpp"
59 #include "utilities/ticks.hpp" 64 #include "utilities/ticks.hpp"
99 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, 104 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
100 G1RemSet* g1rs, 105 G1RemSet* g1rs,
101 ConcurrentG1Refine* cg1r) : 106 ConcurrentG1Refine* cg1r) :
102 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) 107 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
103 {} 108 {}
104 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 109 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
105 bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); 110 bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
106 // This path is executed by the concurrent refine or mutator threads, 111 // This path is executed by the concurrent refine or mutator threads,
107 // concurrently, and so we do not care if card_ptr contains references 112 // concurrently, and so we do not care if card_ptr contains references
108 // that point into the collection set. 113 // that point into the collection set.
109 assert(!oops_into_cset, "should be"); 114 assert(!oops_into_cset, "should be");
128 ClearLoggedCardTableEntryClosure() : 133 ClearLoggedCardTableEntryClosure() :
129 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) 134 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
130 { 135 {
131 for (int i = 0; i < 256; i++) _histo[i] = 0; 136 for (int i = 0; i < 256; i++) _histo[i] = 0;
132 } 137 }
133 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 138 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
134 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 139 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
135 _calls++; 140 _calls++;
136 unsigned char* ujb = (unsigned char*)card_ptr; 141 unsigned char* ujb = (unsigned char*)card_ptr;
137 int ind = (int)(*ujb); 142 int ind = (int)(*ujb);
138 _histo[ind]++; 143 _histo[ind]++;
157 CardTableModRefBS* _ctbs; 162 CardTableModRefBS* _ctbs;
158 public: 163 public:
159 RedirtyLoggedCardTableEntryClosure() : 164 RedirtyLoggedCardTableEntryClosure() :
160 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {} 165 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
161 166
162 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 167 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
163 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 168 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
164 _calls++; 169 _calls++;
165 *card_ptr = 0; 170 *card_ptr = 0;
166 } 171 }
167 return true; 172 return true;
168 } 173 }
169 int calls() { return _calls; } 174 int calls() { return _calls; }
170 };
171
172 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
173 public:
174 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
175 *card_ptr = CardTableModRefBS::dirty_card_val();
176 return true;
177 }
178 }; 175 };
179 176
180 YoungList::YoungList(G1CollectedHeap* g1h) : 177 YoungList::YoungList(G1CollectedHeap* g1h) :
181 _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0), 178 _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
182 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) { 179 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
377 curr->age_in_surv_rate_group_cond()); 374 curr->age_in_surv_rate_group_cond());
378 curr = curr->get_next_young_region(); 375 curr = curr->get_next_young_region();
379 } 376 }
380 } 377 }
381 378
382 gclog_or_tty->print_cr(""); 379 gclog_or_tty->cr();
383 } 380 }
384 381
385 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) 382 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
386 { 383 {
387 // Claim the right to put the region on the dirty cards region list 384 // Claim the right to put the region on the dirty cards region list
433 assert(hr != NULL, "invariant"); 430 assert(hr != NULL, "invariant");
434 hr->set_next_dirty_cards_region(NULL); 431 hr->set_next_dirty_cards_region(NULL);
435 return hr; 432 return hr;
436 } 433 }
437 434
438 void G1CollectedHeap::stop_conc_gc_threads() {
439 _cg1r->stop();
440 _cmThread->stop();
441 }
442
443 #ifdef ASSERT 435 #ifdef ASSERT
444 // A region is added to the collection set as it is retired 436 // A region is added to the collection set as it is retired
445 // so an address p can point to a region which will be in the 437 // so an address p can point to a region which will be in the
446 // collection set but has not yet been retired. This method 438 // collection set but has not yet been retired. This method
447 // therefore is only accurate during a GC pause after all 439 // therefore is only accurate during a GC pause after all
522 G1CollectedHeap* G1CollectedHeap::_g1h; 514 G1CollectedHeap* G1CollectedHeap::_g1h;
523 515
524 // Private methods. 516 // Private methods.
525 517
526 HeapRegion* 518 HeapRegion*
527 G1CollectedHeap::new_region_try_secondary_free_list() { 519 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
528 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 520 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
529 while (!_secondary_free_list.is_empty() || free_regions_coming()) { 521 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
530 if (!_secondary_free_list.is_empty()) { 522 if (!_secondary_free_list.is_empty()) {
531 if (G1ConcRegionFreeingVerbose) { 523 if (G1ConcRegionFreeingVerbose) {
532 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 524 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
538 // again to allocate from it. 530 // again to allocate from it.
539 append_secondary_free_list(); 531 append_secondary_free_list();
540 532
541 assert(!_free_list.is_empty(), "if the secondary_free_list was not " 533 assert(!_free_list.is_empty(), "if the secondary_free_list was not "
542 "empty we should have moved at least one entry to the free_list"); 534 "empty we should have moved at least one entry to the free_list");
543 HeapRegion* res = _free_list.remove_head(); 535 HeapRegion* res = _free_list.remove_region(is_old);
544 if (G1ConcRegionFreeingVerbose) { 536 if (G1ConcRegionFreeingVerbose) {
545 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 537 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
546 "allocated "HR_FORMAT" from secondary_free_list", 538 "allocated "HR_FORMAT" from secondary_free_list",
547 HR_FORMAT_PARAMS(res)); 539 HR_FORMAT_PARAMS(res));
548 } 540 }
560 "could not allocate from secondary_free_list"); 552 "could not allocate from secondary_free_list");
561 } 553 }
562 return NULL; 554 return NULL;
563 } 555 }
564 556
565 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { 557 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
566 assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords, 558 assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
567 "the only time we use this to allocate a humongous region is " 559 "the only time we use this to allocate a humongous region is "
568 "when we are allocating a single humongous region"); 560 "when we are allocating a single humongous region");
569 561
570 HeapRegion* res; 562 HeapRegion* res;
572 if (!_secondary_free_list.is_empty()) { 564 if (!_secondary_free_list.is_empty()) {
573 if (G1ConcRegionFreeingVerbose) { 565 if (G1ConcRegionFreeingVerbose) {
574 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 566 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
575 "forced to look at the secondary_free_list"); 567 "forced to look at the secondary_free_list");
576 } 568 }
577 res = new_region_try_secondary_free_list(); 569 res = new_region_try_secondary_free_list(is_old);
578 if (res != NULL) { 570 if (res != NULL) {
579 return res; 571 return res;
580 } 572 }
581 } 573 }
582 } 574 }
583 res = _free_list.remove_head_or_null(); 575
576 res = _free_list.remove_region(is_old);
577
584 if (res == NULL) { 578 if (res == NULL) {
585 if (G1ConcRegionFreeingVerbose) { 579 if (G1ConcRegionFreeingVerbose) {
586 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 580 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
587 "res == NULL, trying the secondary_free_list"); 581 "res == NULL, trying the secondary_free_list");
588 } 582 }
589 res = new_region_try_secondary_free_list(); 583 res = new_region_try_secondary_free_list(is_old);
590 } 584 }
591 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { 585 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
592 // Currently, only attempts to allocate GC alloc regions set 586 // Currently, only attempts to allocate GC alloc regions set
593 // do_expand to true. So, we should only reach here during a 587 // do_expand to true. So, we should only reach here during a
594 // safepoint. If this assumption changes we might have to 588 // safepoint. If this assumption changes we might have to
601 ergo_format_byte("allocation request"), 595 ergo_format_byte("allocation request"),
602 word_size * HeapWordSize); 596 word_size * HeapWordSize);
603 if (expand(word_size * HeapWordSize)) { 597 if (expand(word_size * HeapWordSize)) {
604 // Given that expand() succeeded in expanding the heap, and we 598 // Given that expand() succeeded in expanding the heap, and we
605 // always expand the heap by an amount aligned to the heap 599 // always expand the heap by an amount aligned to the heap
606 // region size, the free list should in theory not be empty. So 600 // region size, the free list should in theory not be empty.
607 // it would probably be OK to use remove_head(). But the extra 601 // In either case remove_region() will check for NULL.
608 // check for NULL is unlikely to be a performance issue here (we 602 res = _free_list.remove_region(is_old);
609 // just expanded the heap!) so let's just be conservative and
610 // use remove_head_or_null().
611 res = _free_list.remove_head_or_null();
612 } else { 603 } else {
613 _expand_heap_after_alloc_failure = false; 604 _expand_heap_after_alloc_failure = false;
614 } 605 }
615 } 606 }
616 return res; 607 return res;
624 uint first = G1_NULL_HRS_INDEX; 615 uint first = G1_NULL_HRS_INDEX;
625 if (num_regions == 1) { 616 if (num_regions == 1) {
626 // Only one region to allocate, no need to go through the slower 617 // Only one region to allocate, no need to go through the slower
627 // path. The caller will attempt the expansion if this fails, so 618 // path. The caller will attempt the expansion if this fails, so
628 // let's not try to expand here too. 619 // let's not try to expand here too.
629 HeapRegion* hr = new_region(word_size, false /* do_expand */); 620 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
630 if (hr != NULL) { 621 if (hr != NULL) {
631 first = hr->hrs_index(); 622 first = hr->hrs_index();
632 } else { 623 } else {
633 first = G1_NULL_HRS_INDEX; 624 first = G1_NULL_HRS_INDEX;
634 } 625 }
1294 ResourceMark rm; 1285 ResourceMark rm;
1295 1286
1296 print_heap_before_gc(); 1287 print_heap_before_gc();
1297 trace_heap_before_gc(gc_tracer); 1288 trace_heap_before_gc(gc_tracer);
1298 1289
1299 size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); 1290 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1300 1291
1301 HRSPhaseSetter x(HRSPhaseFullGC);
1302 verify_region_sets_optional(); 1292 verify_region_sets_optional();
1303 1293
1304 const bool do_clear_all_soft_refs = clear_all_soft_refs || 1294 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1305 collector_policy()->should_clear_all_soft_refs(); 1295 collector_policy()->should_clear_all_soft_refs();
1306 1296
1573 1563
1574 // This code is mostly copied from TenuredGeneration. 1564 // This code is mostly copied from TenuredGeneration.
1575 void 1565 void
1576 G1CollectedHeap:: 1566 G1CollectedHeap::
1577 resize_if_necessary_after_full_collection(size_t word_size) { 1567 resize_if_necessary_after_full_collection(size_t word_size) {
1578 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1579
1580 // Include the current allocation, if any, and bytes that will be 1568 // Include the current allocation, if any, and bytes that will be
1581 // pre-allocated to support collections, as "used". 1569 // pre-allocated to support collections, as "used".
1582 const size_t used_after_gc = used(); 1570 const size_t used_after_gc = used();
1583 const size_t capacity_after_gc = capacity(); 1571 const size_t capacity_after_gc = capacity();
1584 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1572 const size_t free_after_gc = capacity_after_gc - used_after_gc;
1928 _mark_in_progress(false), 1916 _mark_in_progress(false),
1929 _cg1r(NULL), _summary_bytes_used(0), 1917 _cg1r(NULL), _summary_bytes_used(0),
1930 _g1mm(NULL), 1918 _g1mm(NULL),
1931 _refine_cte_cl(NULL), 1919 _refine_cte_cl(NULL),
1932 _full_collection(false), 1920 _full_collection(false),
1933 _free_list("Master Free List"), 1921 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1934 _secondary_free_list("Secondary Free List"), 1922 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1935 _old_set("Old Set"), 1923 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1936 _humongous_set("Master Humongous Set"), 1924 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1937 _free_regions_coming(false), 1925 _free_regions_coming(false),
1938 _young_list(new YoungList(this)), 1926 _young_list(new YoungList(this)),
1939 _gc_time_stamp(0), 1927 _gc_time_stamp(0),
1940 _retained_old_gc_alloc_region(NULL), 1928 _retained_old_gc_alloc_region(NULL),
1941 _survivor_plab_stats(YoungPLABSize, PLABWeight), 1929 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1963 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; 1951 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1964 1952
1965 int n_queues = MAX2((int)ParallelGCThreads, 1); 1953 int n_queues = MAX2((int)ParallelGCThreads, 1);
1966 _task_queues = new RefToScanQueueSet(n_queues); 1954 _task_queues = new RefToScanQueueSet(n_queues);
1967 1955
1968 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); 1956 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1969 assert(n_rem_sets > 0, "Invariant."); 1957 assert(n_rem_sets > 0, "Invariant.");
1970 1958
1971 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); 1959 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1972 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); 1960 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1973 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); 1961 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
2079 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; 2067 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2080 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); 2068 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2081 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, 2069 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2082 "too many cards per region"); 2070 "too many cards per region");
2083 2071
2084 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); 2072 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2085 2073
2086 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 2074 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2087 heap_word_size(init_byte_size)); 2075 heap_word_size(init_byte_size));
2088 2076
2089 _g1h = this; 2077 _g1h = this;
2182 2170
2183 // Do create of the monitoring and management support so that 2171 // Do create of the monitoring and management support so that
2184 // values in the heap have been properly initialized. 2172 // values in the heap have been properly initialized.
2185 _g1mm = new G1MonitoringSupport(this); 2173 _g1mm = new G1MonitoringSupport(this);
2186 2174
2175 G1StringDedup::initialize();
2176
2187 return JNI_OK; 2177 return JNI_OK;
2178 }
2179
2180 void G1CollectedHeap::stop() {
2181 // Stop all concurrent threads. We do this to make sure these threads
2182 // do not continue to execute and access resources (e.g. gclog_or_tty)
2183 // that are destroyed during shutdown.
2184 _cg1r->stop();
2185 _cmThread->stop();
2186 if (G1StringDedup::is_enabled()) {
2187 G1StringDedup::stop();
2188 }
2188 } 2189 }
2189 2190
2190 size_t G1CollectedHeap::conservative_max_heap_alignment() { 2191 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2191 return HeapRegion::max_region_size(); 2192 return HeapRegion::max_region_size();
2192 } 2193 }
2241 // mt discovery 2242 // mt discovery
2242 (int) MAX2(ParallelGCThreads, ConcGCThreads), 2243 (int) MAX2(ParallelGCThreads, ConcGCThreads),
2243 // degree of mt discovery 2244 // degree of mt discovery
2244 false, 2245 false,
2245 // Reference discovery is not atomic 2246 // Reference discovery is not atomic
2246 &_is_alive_closure_cm, 2247 &_is_alive_closure_cm);
2247 // is alive closure 2248 // is alive closure
2248 // (for efficiency/performance) 2249 // (for efficiency/performance)
2249 true);
2250 // Setting next fields of discovered
2251 // lists requires a barrier.
2252 2250
2253 // STW ref processor 2251 // STW ref processor
2254 _ref_processor_stw = 2252 _ref_processor_stw =
2255 new ReferenceProcessor(mr, // span 2253 new ReferenceProcessor(mr, // span
2256 ParallelRefProcEnabled && (ParallelGCThreads > 1), 2254 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2261 // mt discovery 2259 // mt discovery
2262 MAX2((int)ParallelGCThreads, 1), 2260 MAX2((int)ParallelGCThreads, 1),
2263 // degree of mt discovery 2261 // degree of mt discovery
2264 true, 2262 true,
2265 // Reference discovery is atomic 2263 // Reference discovery is atomic
2266 &_is_alive_closure_stw, 2264 &_is_alive_closure_stw);
2267 // is alive closure 2265 // is alive closure
2268 // (for efficiency/performance) 2266 // (for efficiency/performance)
2269 false);
2270 // Setting next fields of discovered
2271 // lists requires a barrier.
2272 } 2267 }
2273 2268
2274 size_t G1CollectedHeap::capacity() const { 2269 size_t G1CollectedHeap::capacity() const {
2275 return _g1_committed.byte_size(); 2270 return _g1_committed.byte_size();
2276 } 2271 }
2321 #endif // PRODUCT 2316 #endif // PRODUCT
2322 2317
2323 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, 2318 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2324 DirtyCardQueue* into_cset_dcq, 2319 DirtyCardQueue* into_cset_dcq,
2325 bool concurrent, 2320 bool concurrent,
2326 int worker_i) { 2321 uint worker_i) {
2327 // Clean cards in the hot card cache 2322 // Clean cards in the hot card cache
2328 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 2323 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2329 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq); 2324 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2330 2325
2331 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2326 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2369 } 2364 }
2370 size_t result() { return _used; } 2365 size_t result() { return _used; }
2371 }; 2366 };
2372 2367
2373 size_t G1CollectedHeap::recalculate_used() const { 2368 size_t G1CollectedHeap::recalculate_used() const {
2369 double recalculate_used_start = os::elapsedTime();
2370
2374 SumUsedClosure blk; 2371 SumUsedClosure blk;
2375 heap_region_iterate(&blk); 2372 heap_region_iterate(&blk);
2373
2374 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2376 return blk.result(); 2375 return blk.result();
2377 } 2376 }
2378 2377
2379 size_t G1CollectedHeap::unsafe_max_alloc() { 2378 size_t G1CollectedHeap::unsafe_max_alloc() {
2380 if (free_regions() > 0) return HeapRegion::GrainBytes; 2379 if (free_regions() > 0) return HeapRegion::GrainBytes;
2875 } 2874 }
2876 } 2875 }
2877 2876
2878 // Given the id of a worker, obtain or calculate a suitable 2877 // Given the id of a worker, obtain or calculate a suitable
2879 // starting region for iterating over the current collection set. 2878 // starting region for iterating over the current collection set.
2880 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) { 2879 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
2881 assert(get_gc_time_stamp() > 0, "should have been updated by now"); 2880 assert(get_gc_time_stamp() > 0, "should have been updated by now");
2882 2881
2883 HeapRegion* result = NULL; 2882 HeapRegion* result = NULL;
2884 unsigned gc_time_stamp = get_gc_time_stamp(); 2883 unsigned gc_time_stamp = get_gc_time_stamp();
2885 2884
3023 bool G1CollectedHeap::supports_tlab_allocation() const { 3022 bool G1CollectedHeap::supports_tlab_allocation() const {
3024 return true; 3023 return true;
3025 } 3024 }
3026 3025
3027 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 3026 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
3028 return HeapRegion::GrainBytes; 3027 return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
3028 }
3029
3030 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
3031 return young_list()->eden_used_bytes();
3032 }
3033
3034 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
3035 // must be smaller than the humongous object limit.
3036 size_t G1CollectedHeap::max_tlab_size() const {
3037 return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
3029 } 3038 }
3030 3039
3031 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 3040 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
3032 // Return the remaining space in the cur alloc region, but not less than 3041 // Return the remaining space in the cur alloc region, but not less than
3033 // the min TLAB size. 3042 // the min TLAB size.
3035 // Also, this value can be at most the humongous object threshold, 3044 // Also, this value can be at most the humongous object threshold,
3036 // since we can't allow tlabs to grow big enough to accommodate 3045 // since we can't allow tlabs to grow big enough to accommodate
3037 // humongous objects. 3046 // humongous objects.
3038 3047
3039 HeapRegion* hr = _mutator_alloc_region.get(); 3048 HeapRegion* hr = _mutator_alloc_region.get();
3040 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; 3049 size_t max_tlab = max_tlab_size() * wordSize;
3041 if (hr == NULL) { 3050 if (hr == NULL) {
3042 return max_tlab_size; 3051 return max_tlab;
3043 } else { 3052 } else {
3044 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); 3053 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
3045 } 3054 }
3046 } 3055 }
3047 3056
3048 size_t G1CollectedHeap::max_capacity() const { 3057 size_t G1CollectedHeap::max_capacity() const {
3049 return _g1_reserved.byte_size(); 3058 return _g1_reserved.byte_size();
3104 default: ShouldNotReachHere(); 3113 default: ShouldNotReachHere();
3105 } 3114 }
3106 return NULL; // keep some compilers happy 3115 return NULL; // keep some compilers happy
3107 } 3116 }
3108 3117
3109 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can 3118 class VerifyRootsClosure: public OopClosure {
3110 // pass it as the perm_blk to SharedHeap::process_strong_roots.
3111 // When process_strong_roots stop calling perm_blk->younger_refs_iterate
3112 // we can change this closure to extend the simpler OopClosure.
3113 class VerifyRootsClosure: public OopsInGenClosure {
3114 private: 3119 private:
3115 G1CollectedHeap* _g1h; 3120 G1CollectedHeap* _g1h;
3116 VerifyOption _vo; 3121 VerifyOption _vo;
3117 bool _failures; 3122 bool _failures;
3118 public: 3123 public:
3144 3149
3145 void do_oop(oop* p) { do_oop_nv(p); } 3150 void do_oop(oop* p) { do_oop_nv(p); }
3146 void do_oop(narrowOop* p) { do_oop_nv(p); } 3151 void do_oop(narrowOop* p) { do_oop_nv(p); }
3147 }; 3152 };
3148 3153
3149 class G1VerifyCodeRootOopClosure: public OopsInGenClosure { 3154 class G1VerifyCodeRootOopClosure: public OopClosure {
3150 G1CollectedHeap* _g1h; 3155 G1CollectedHeap* _g1h;
3151 OopClosure* _root_cl; 3156 OopClosure* _root_cl;
3152 nmethod* _nm; 3157 nmethod* _nm;
3153 VerifyOption _vo; 3158 VerifyOption _vo;
3154 bool _failures; 3159 bool _failures;
3484 } 3489 }
3485 } 3490 }
3486 if (!silent) gclog_or_tty->print("RemSet "); 3491 if (!silent) gclog_or_tty->print("RemSet ");
3487 rem_set()->verify(); 3492 rem_set()->verify();
3488 3493
3494 if (G1StringDedup::is_enabled()) {
3495 if (!silent) gclog_or_tty->print("StrDedup ");
3496 G1StringDedup::verify();
3497 }
3498
3489 if (failures) { 3499 if (failures) {
3490 gclog_or_tty->print_cr("Heap:"); 3500 gclog_or_tty->print_cr("Heap:");
3491 // It helps to have the per-region information in the output to 3501 // It helps to have the per-region information in the output to
3492 // help us track down what went wrong. This is why we call 3502 // help us track down what went wrong. This is why we call
3493 // print_extended_on() instead of print_on(). 3503 // print_extended_on() instead of print_on().
3494 print_extended_on(gclog_or_tty); 3504 print_extended_on(gclog_or_tty);
3495 gclog_or_tty->print_cr(""); 3505 gclog_or_tty->cr();
3496 #ifndef PRODUCT 3506 #ifndef PRODUCT
3497 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { 3507 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3498 concurrent_mark()->print_reachable("at-verification-failure", 3508 concurrent_mark()->print_reachable("at-verification-failure",
3499 vo, false /* all */); 3509 vo, false /* all */);
3500 } 3510 }
3501 #endif 3511 #endif
3502 gclog_or_tty->flush(); 3512 gclog_or_tty->flush();
3503 } 3513 }
3504 guarantee(!failures, "there should not have been any failures"); 3514 guarantee(!failures, "there should not have been any failures");
3505 } else { 3515 } else {
3506 if (!silent) 3516 if (!silent) {
3507 gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) "); 3517 gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3518 if (G1StringDedup::is_enabled()) {
3519 gclog_or_tty->print(", StrDedup");
3520 }
3521 gclog_or_tty->print(") ");
3522 }
3508 } 3523 }
3509 } 3524 }
3510 3525
3511 void G1CollectedHeap::verify(bool silent) { 3526 void G1CollectedHeap::verify(bool silent) {
3512 verify(silent, VerifyOption_G1UsePrevMarking); 3527 verify(silent, VerifyOption_G1UsePrevMarking);
3543 bool doHeapRegion(HeapRegion* r) { 3558 bool doHeapRegion(HeapRegion* r) {
3544 r->print_on(_st); 3559 r->print_on(_st);
3545 return false; 3560 return false;
3546 } 3561 }
3547 }; 3562 };
3563
3564 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3565 const HeapRegion* hr,
3566 const VerifyOption vo) const {
3567 switch (vo) {
3568 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3569 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3570 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3571 default: ShouldNotReachHere();
3572 }
3573 return false; // keep some compilers happy
3574 }
3575
3576 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3577 const VerifyOption vo) const {
3578 switch (vo) {
3579 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3580 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3581 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3582 default: ShouldNotReachHere();
3583 }
3584 return false; // keep some compilers happy
3585 }
3548 3586
3549 void G1CollectedHeap::print_on(outputStream* st) const { 3587 void G1CollectedHeap::print_on(outputStream* st) const {
3550 st->print(" %-20s", "garbage-first heap"); 3588 st->print(" %-20s", "garbage-first heap");
3551 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 3589 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3552 capacity()/K, used_unlocked()/K); 3590 capacity()/K, used_unlocked()/K);
3595 } 3633 }
3596 _cmThread->print_on(st); 3634 _cmThread->print_on(st);
3597 st->cr(); 3635 st->cr();
3598 _cm->print_worker_threads_on(st); 3636 _cm->print_worker_threads_on(st);
3599 _cg1r->print_worker_threads_on(st); 3637 _cg1r->print_worker_threads_on(st);
3638 if (G1StringDedup::is_enabled()) {
3639 G1StringDedup::print_worker_threads_on(st);
3640 }
3600 } 3641 }
3601 3642
3602 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { 3643 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3603 if (G1CollectedHeap::use_parallel_gc_threads()) { 3644 if (G1CollectedHeap::use_parallel_gc_threads()) {
3604 workers()->threads_do(tc); 3645 workers()->threads_do(tc);
3605 } 3646 }
3606 tc->do_thread(_cmThread); 3647 tc->do_thread(_cmThread);
3607 _cg1r->threads_do(tc); 3648 _cg1r->threads_do(tc);
3649 if (G1StringDedup::is_enabled()) {
3650 G1StringDedup::threads_do(tc);
3651 }
3608 } 3652 }
3609 3653
3610 void G1CollectedHeap::print_tracing_info() const { 3654 void G1CollectedHeap::print_tracing_info() const {
3611 // We'll overload this to mean "trace GC pause statistics." 3655 // We'll overload this to mean "trace GC pause statistics."
3612 if (TraceGen0Time || TraceGen1Time) { 3656 if (TraceGen0Time || TraceGen1Time) {
3650 } 3694 }
3651 3695
3652 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { 3696 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3653 gclog_or_tty->cr(); 3697 gclog_or_tty->cr();
3654 gclog_or_tty->print_cr("========================================"); 3698 gclog_or_tty->print_cr("========================================");
3655 gclog_or_tty->print_cr(msg); 3699 gclog_or_tty->print_cr("%s", msg);
3656 gclog_or_tty->cr(); 3700 gclog_or_tty->cr();
3657 } 3701 }
3658 3702
3659 ~PrintRSetsClosure() { 3703 ~PrintRSetsClosure() {
3660 gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum); 3704 gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3682 3726
3683 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 3727 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3684 // always_do_update_barrier = false; 3728 // always_do_update_barrier = false;
3685 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 3729 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3686 // Fill TLAB's and such 3730 // Fill TLAB's and such
3731 accumulate_statistics_all_tlabs();
3687 ensure_parsability(true); 3732 ensure_parsability(true);
3688 3733
3689 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && 3734 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3690 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3735 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3691 g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); 3736 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3705 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 3750 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3706 // is set. 3751 // is set.
3707 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 3752 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3708 "derived pointer present")); 3753 "derived pointer present"));
3709 // always_do_update_barrier = true; 3754 // always_do_update_barrier = true;
3755
3756 resize_all_tlabs();
3710 3757
3711 // We have just completed a GC. Update the soft reference 3758 // We have just completed a GC. Update the soft reference
3712 // policy with the new heap occupancy 3759 // policy with the new heap occupancy
3713 Universe::update_heap_info_at_gc(); 3760 Universe::update_heap_info_at_gc();
3714 } 3761 }
3906 ResourceMark rm; 3953 ResourceMark rm;
3907 3954
3908 print_heap_before_gc(); 3955 print_heap_before_gc();
3909 trace_heap_before_gc(_gc_tracer_stw); 3956 trace_heap_before_gc(_gc_tracer_stw);
3910 3957
3911 HRSPhaseSetter x(HRSPhaseEvacuation);
3912 verify_region_sets_optional(); 3958 verify_region_sets_optional();
3913 verify_dirty_young_regions(); 3959 verify_dirty_young_regions();
3914 3960
3915 // This call will decide whether this pause is an initial-mark 3961 // This call will decide whether this pause is an initial-mark
3916 // pause. If it is, during_initial_mark_pause() will return true 3962 // pause. If it is, during_initial_mark_pause() will return true
4405 } 4451 }
4406 4452
4407 void G1CollectedHeap::remove_self_forwarding_pointers() { 4453 void G1CollectedHeap::remove_self_forwarding_pointers() {
4408 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); 4454 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4409 4455
4456 double remove_self_forwards_start = os::elapsedTime();
4457
4410 G1ParRemoveSelfForwardPtrsTask rsfp_task(this); 4458 G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4411 4459
4412 if (G1CollectedHeap::use_parallel_gc_threads()) { 4460 if (G1CollectedHeap::use_parallel_gc_threads()) {
4413 set_par_threads(); 4461 set_par_threads();
4414 workers()->run_task(&rsfp_task); 4462 workers()->run_task(&rsfp_task);
4432 markOop m = _preserved_marks_of_objs.pop(); 4480 markOop m = _preserved_marks_of_objs.pop();
4433 obj->set_mark(m); 4481 obj->set_mark(m);
4434 } 4482 }
4435 _objs_with_preserved_marks.clear(true); 4483 _objs_with_preserved_marks.clear(true);
4436 _preserved_marks_of_objs.clear(true); 4484 _preserved_marks_of_objs.clear(true);
4485
4486 g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
4437 } 4487 }
4438 4488
4439 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { 4489 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4440 _evac_failure_scan_stack->push(obj); 4490 _evac_failure_scan_stack->push(obj);
4441 } 4491 }
4553 } 4603 }
4554 4604
4555 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : 4605 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4556 ParGCAllocBuffer(gclab_word_size), _retired(false) { } 4606 ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4557 4607
4558 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) 4608 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4559 : _g1h(g1h), 4609 : _g1h(g1h),
4560 _refs(g1h->task_queue(queue_num)), 4610 _refs(g1h->task_queue(queue_num)),
4561 _dcq(&g1h->dirty_card_queue_set()), 4611 _dcq(&g1h->dirty_card_queue_set()),
4562 _ct_bs(g1h->g1_barrier_set()), 4612 _ct_bs(g1h->g1_barrier_set()),
4563 _g1_rem(g1h->g1_rem_set()), 4613 _g1_rem(g1h->g1_rem_set()),
4564 _hash_seed(17), _queue_num(queue_num), 4614 _hash_seed(17), _queue_num(queue_num),
4565 _term_attempts(0), 4615 _term_attempts(0),
4566 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 4616 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4567 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 4617 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4568 _age_table(false), 4618 _age_table(false), _scanner(g1h, this, rp),
4569 _strong_roots_time(0), _term_time(0), 4619 _strong_roots_time(0), _term_time(0),
4570 _alloc_buffer_waste(0), _undo_waste(0) { 4620 _alloc_buffer_waste(0), _undo_waste(0) {
4571 // we allocate G1YoungSurvRateNumRegions plus one entries, since 4621 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4572 // we "sacrifice" entry 0 to keep track of surviving bytes for 4622 // we "sacrifice" entry 0 to keep track of surviving bytes for
4573 // non-young regions (where the age is -1) 4623 // non-young regions (where the age is -1)
4653 } 4703 }
4654 } 4704 }
4655 #endif // ASSERT 4705 #endif // ASSERT
4656 4706
4657 void G1ParScanThreadState::trim_queue() { 4707 void G1ParScanThreadState::trim_queue() {
4658 assert(_evac_cl != NULL, "not set");
4659 assert(_evac_failure_cl != NULL, "not set"); 4708 assert(_evac_failure_cl != NULL, "not set");
4660 assert(_partial_scan_cl != NULL, "not set");
4661 4709
4662 StarTask ref; 4710 StarTask ref;
4663 do { 4711 do {
4664 // Drain the overflow stack first, so other threads can steal. 4712 // Drain the overflow stack first, so other threads can steal.
4665 while (refs()->pop_overflow(ref)) { 4713 while (refs()->pop_overflow(ref)) {
4672 } while (!refs()->is_empty()); 4720 } while (!refs()->is_empty());
4673 } 4721 }
4674 4722
4675 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, 4723 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4676 G1ParScanThreadState* par_scan_state) : 4724 G1ParScanThreadState* par_scan_state) :
4677 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 4725 _g1(g1), _par_scan_state(par_scan_state),
4678 _par_scan_state(par_scan_state), 4726 _worker_id(par_scan_state->queue_num()) { }
4679 _worker_id(par_scan_state->queue_num()), 4727
4680 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), 4728 void G1ParCopyHelper::mark_object(oop obj) {
4681 _mark_in_progress(_g1->mark_in_progress()) { }
4682
4683 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4684 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4685 #ifdef ASSERT 4729 #ifdef ASSERT
4686 HeapRegion* hr = _g1->heap_region_containing(obj); 4730 HeapRegion* hr = _g1->heap_region_containing(obj);
4687 assert(hr != NULL, "sanity"); 4731 assert(hr != NULL, "sanity");
4688 assert(!hr->in_collection_set(), "should not mark objects in the CSet"); 4732 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4689 #endif // ASSERT 4733 #endif // ASSERT
4690 4734
4691 // We know that the object is not moving so it's safe to read its size. 4735 // We know that the object is not moving so it's safe to read its size.
4692 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); 4736 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4693 } 4737 }
4694 4738
4695 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4739 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4696 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4697 ::mark_forwarded_object(oop from_obj, oop to_obj) {
4698 #ifdef ASSERT 4740 #ifdef ASSERT
4699 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 4741 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4700 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 4742 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4701 assert(from_obj != to_obj, "should not be self-forwarded"); 4743 assert(from_obj != to_obj, "should not be self-forwarded");
4702 4744
4714 // well-formed. So we have to read its size from its from-space 4756 // well-formed. So we have to read its size from its from-space
4715 // image which we know should not be changing. 4757 // image which we know should not be changing.
4716 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); 4758 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4717 } 4759 }
4718 4760
4719 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4761 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
4720 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4721 ::copy_to_survivor_space(oop old) {
4722 size_t word_sz = old->size(); 4762 size_t word_sz = old->size();
4723 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 4763 HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
4724 // +1 to make the -1 indexes valid... 4764 // +1 to make the -1 indexes valid...
4725 int young_index = from_region->young_index_in_cset()+1; 4765 int young_index = from_region->young_index_in_cset()+1;
4726 assert( (from_region->is_young() && young_index > 0) || 4766 assert( (from_region->is_young() && young_index > 0) ||
4727 (!from_region->is_young() && young_index == 0), "invariant" ); 4767 (!from_region->is_young() && young_index == 0), "invariant" );
4728 G1CollectorPolicy* g1p = _g1->g1_policy(); 4768 G1CollectorPolicy* g1p = _g1h->g1_policy();
4729 markOop m = old->mark(); 4769 markOop m = old->mark();
4730 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 4770 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4731 : m->age(); 4771 : m->age();
4732 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 4772 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4733 word_sz); 4773 word_sz);
4734 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 4774 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
4735 #ifndef PRODUCT 4775 #ifndef PRODUCT
4736 // Should this evacuation fail? 4776 // Should this evacuation fail?
4737 if (_g1->evacuation_should_fail()) { 4777 if (_g1h->evacuation_should_fail()) {
4738 if (obj_ptr != NULL) { 4778 if (obj_ptr != NULL) {
4739 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4779 undo_allocation(alloc_purpose, obj_ptr, word_sz);
4740 obj_ptr = NULL; 4780 obj_ptr = NULL;
4741 } 4781 }
4742 } 4782 }
4743 #endif // !PRODUCT 4783 #endif // !PRODUCT
4744 4784
4745 if (obj_ptr == NULL) { 4785 if (obj_ptr == NULL) {
4746 // This will either forward-to-self, or detect that someone else has 4786 // This will either forward-to-self, or detect that someone else has
4747 // installed a forwarding pointer. 4787 // installed a forwarding pointer.
4748 return _g1->handle_evacuation_failure_par(_par_scan_state, old); 4788 return _g1h->handle_evacuation_failure_par(this, old);
4749 } 4789 }
4750 4790
4751 oop obj = oop(obj_ptr); 4791 oop obj = oop(obj_ptr);
4752 4792
4753 // We're going to allocate linearly, so might as well prefetch ahead. 4793 // We're going to allocate linearly, so might as well prefetch ahead.
4754 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 4794 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4755 4795
4756 oop forward_ptr = old->forward_to_atomic(obj); 4796 oop forward_ptr = old->forward_to_atomic(obj);
4757 if (forward_ptr == NULL) { 4797 if (forward_ptr == NULL) {
4758 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 4798 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4799
4800 // alloc_purpose is just a hint to allocate() above, recheck the type of region
4801 // we actually allocated from and update alloc_purpose accordingly
4802 HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
4803 alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
4804
4759 if (g1p->track_object_age(alloc_purpose)) { 4805 if (g1p->track_object_age(alloc_purpose)) {
4760 // We could simply do obj->incr_age(). However, this causes a 4806 // We could simply do obj->incr_age(). However, this causes a
4761 // performance issue. obj->incr_age() will first check whether 4807 // performance issue. obj->incr_age() will first check whether
4762 // the object has a displaced mark by checking its mark word; 4808 // the object has a displaced mark by checking its mark word;
4763 // getting the mark word from the new location of the object 4809 // getting the mark word from the new location of the object
4776 obj->incr_age(); 4822 obj->incr_age();
4777 } else { 4823 } else {
4778 m = m->incr_age(); 4824 m = m->incr_age();
4779 obj->set_mark(m); 4825 obj->set_mark(m);
4780 } 4826 }
4781 _par_scan_state->age_table()->add(obj, word_sz); 4827 age_table()->add(obj, word_sz);
4782 } else { 4828 } else {
4783 obj->set_mark(m); 4829 obj->set_mark(m);
4784 } 4830 }
4785 4831
4786 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 4832 if (G1StringDedup::is_enabled()) {
4833 G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
4834 to_region->is_young(),
4835 queue_num(),
4836 obj);
4837 }
4838
4839 size_t* surv_young_words = surviving_young_words();
4787 surv_young_words[young_index] += word_sz; 4840 surv_young_words[young_index] += word_sz;
4788 4841
4789 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 4842 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4790 // We keep track of the next start index in the length field of 4843 // We keep track of the next start index in the length field of
4791 // the to-space object. The actual length can be found in the 4844 // the to-space object. The actual length can be found in the
4792 // length field of the from-space object. 4845 // length field of the from-space object.
4793 arrayOop(obj)->set_length(0); 4846 arrayOop(obj)->set_length(0);
4794 oop* old_p = set_partial_array_mask(old); 4847 oop* old_p = set_partial_array_mask(old);
4795 _par_scan_state->push_on_queue(old_p); 4848 push_on_queue(old_p);
4796 } else { 4849 } else {
4797 // No point in using the slower heap_region_containing() method, 4850 // No point in using the slower heap_region_containing() method,
4798 // given that we know obj is in the heap. 4851 // given that we know obj is in the heap.
4799 _scanner.set_region(_g1->heap_region_containing_raw(obj)); 4852 _scanner.set_region(_g1h->heap_region_containing_raw(obj));
4800 obj->oop_iterate_backwards(&_scanner); 4853 obj->oop_iterate_backwards(&_scanner);
4801 } 4854 }
4802 } else { 4855 } else {
4803 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4856 undo_allocation(alloc_purpose, obj_ptr, word_sz);
4804 obj = forward_ptr; 4857 obj = forward_ptr;
4805 } 4858 }
4806 return obj; 4859 return obj;
4807 } 4860 }
4808 4861
4811 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { 4864 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4812 _scanned_klass->record_modified_oops(); 4865 _scanned_klass->record_modified_oops();
4813 } 4866 }
4814 } 4867 }
4815 4868
4816 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 4869 template <G1Barrier barrier, bool do_mark_object>
4817 template <class T> 4870 template <class T>
4818 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> 4871 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4819 ::do_oop_work(T* p) { 4872 T heap_oop = oopDesc::load_heap_oop(p);
4820 oop obj = oopDesc::load_decode_heap_oop(p); 4873
4821 assert(barrier != G1BarrierRS || obj != NULL, 4874 if (oopDesc::is_null(heap_oop)) {
4822 "Precondition: G1BarrierRS implies obj is non-NULL"); 4875 return;
4876 }
4877
4878 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4823 4879
4824 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); 4880 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4825 4881
4826 // here the null check is implicit in the cset_fast_test() test
4827 if (_g1->in_cset_fast_test(obj)) { 4882 if (_g1->in_cset_fast_test(obj)) {
4828 oop forwardee; 4883 oop forwardee;
4829 if (obj->is_forwarded()) { 4884 if (obj->is_forwarded()) {
4830 forwardee = obj->forwardee(); 4885 forwardee = obj->forwardee();
4831 } else { 4886 } else {
4832 forwardee = copy_to_survivor_space(obj); 4887 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4833 } 4888 }
4834 assert(forwardee != NULL, "forwardee should not be NULL"); 4889 assert(forwardee != NULL, "forwardee should not be NULL");
4835 oopDesc::encode_store_heap_oop(p, forwardee); 4890 oopDesc::encode_store_heap_oop(p, forwardee);
4836 if (do_mark_object && forwardee != obj) { 4891 if (do_mark_object && forwardee != obj) {
4837 // If the object is self-forwarded we don't need to explicitly 4892 // If the object is self-forwarded we don't need to explicitly
4838 // mark it, the evacuation failure protocol will do so. 4893 // mark it, the evacuation failure protocol will do so.
4839 mark_forwarded_object(obj, forwardee); 4894 mark_forwarded_object(obj, forwardee);
4840 } 4895 }
4841 4896
4842 // When scanning the RS, we only care about objs in CS. 4897 if (barrier == G1BarrierKlass) {
4843 if (barrier == G1BarrierRS) {
4844 _par_scan_state->update_rs(_from, p, _worker_id);
4845 } else if (barrier == G1BarrierKlass) {
4846 do_klass_barrier(p, forwardee); 4898 do_klass_barrier(p, forwardee);
4847 } 4899 }
4848 } else { 4900 } else {
4849 // The object is not in collection set. If we're a root scanning 4901 // The object is not in collection set. If we're a root scanning
4850 // closure during an initial mark pause (i.e. do_mark_object will 4902 // closure during an initial mark pause (i.e. do_mark_object will
4851 // be true) then attempt to mark the object. 4903 // be true) then attempt to mark the object.
4852 if (do_mark_object && _g1->is_in_g1_reserved(obj)) { 4904 if (do_mark_object) {
4853 mark_object(obj); 4905 mark_object(obj);
4854 } 4906 }
4855 } 4907 }
4856 4908
4857 if (barrier == G1BarrierEvac && obj != NULL) { 4909 if (barrier == G1BarrierEvac) {
4858 _par_scan_state->update_rs(_from, p, _worker_id); 4910 _par_scan_state->update_rs(_from, p, _worker_id);
4859 } 4911 }
4860 4912 }
4861 if (do_gen_barrier && obj != NULL) { 4913
4862 par_do_barrier(p); 4914 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
4863 } 4915 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4864 }
4865
4866 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4867 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4868
4869 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4870 assert(has_partial_array_mask(p), "invariant");
4871 oop from_obj = clear_partial_array_mask(p);
4872
4873 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4874 assert(from_obj->is_objArray(), "must be obj array");
4875 objArrayOop from_obj_array = objArrayOop(from_obj);
4876 // The from-space object contains the real length.
4877 int length = from_obj_array->length();
4878
4879 assert(from_obj->is_forwarded(), "must be forwarded");
4880 oop to_obj = from_obj->forwardee();
4881 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4882 objArrayOop to_obj_array = objArrayOop(to_obj);
4883 // We keep track of the next start index in the length field of the
4884 // to-space object.
4885 int next_index = to_obj_array->length();
4886 assert(0 <= next_index && next_index < length,
4887 err_msg("invariant, next index: %d, length: %d", next_index, length));
4888
4889 int start = next_index;
4890 int end = length;
4891 int remainder = end - start;
4892 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4893 if (remainder > 2 * ParGCArrayScanChunk) {
4894 end = start + ParGCArrayScanChunk;
4895 to_obj_array->set_length(end);
4896 // Push the remainder before we process the range in case another
4897 // worker has run out of things to do and can steal it.
4898 oop* from_obj_p = set_partial_array_mask(from_obj);
4899 _par_scan_state->push_on_queue(from_obj_p);
4900 } else {
4901 assert(length == end, "sanity");
4902 // We'll process the final range for this object. Restore the length
4903 // so that the heap remains parsable in case of evacuation failure.
4904 to_obj_array->set_length(end);
4905 }
4906 _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4907 // Process indexes [start,end). It will also process the header
4908 // along with the first chunk (i.e., the chunk with start == 0).
4909 // Note that at this point the length field of to_obj_array is not
4910 // correct given that we are using it to keep track of the next
4911 // start index. oop_iterate_range() (thankfully!) ignores the length
4912 // field and only relies on the start / end parameters. It does
4913 // however return the size of the object which will be incorrect. So
4914 // we have to ignore it even if we wanted to use it.
4915 to_obj_array->oop_iterate_range(&_scanner, start, end);
4916 }
4917 4916
4918 class G1ParEvacuateFollowersClosure : public VoidClosure { 4917 class G1ParEvacuateFollowersClosure : public VoidClosure {
4919 protected: 4918 protected:
4920 G1CollectedHeap* _g1h; 4919 G1CollectedHeap* _g1h;
4921 G1ParScanThreadState* _par_scan_state; 4920 G1ParScanThreadState* _par_scan_state;
5053 ResourceMark rm; 5052 ResourceMark rm;
5054 HandleMark hm; 5053 HandleMark hm;
5055 5054
5056 ReferenceProcessor* rp = _g1h->ref_processor_stw(); 5055 ReferenceProcessor* rp = _g1h->ref_processor_stw();
5057 5056
5058 G1ParScanThreadState pss(_g1h, worker_id); 5057 G1ParScanThreadState pss(_g1h, worker_id, rp);
5059 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
5060 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); 5058 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5061 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp); 5059
5062
5063 pss.set_evac_closure(&scan_evac_cl);
5064 pss.set_evac_failure_closure(&evac_failure_cl); 5060 pss.set_evac_failure_closure(&evac_failure_cl);
5065 pss.set_partial_scan_closure(&partial_scan_cl);
5066 5061
5067 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); 5062 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
5068 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); 5063 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
5069 5064
5070 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); 5065 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
5138 g1_process_strong_roots(bool is_scavenging, 5133 g1_process_strong_roots(bool is_scavenging,
5139 ScanningOption so, 5134 ScanningOption so,
5140 OopClosure* scan_non_heap_roots, 5135 OopClosure* scan_non_heap_roots,
5141 OopsInHeapRegionClosure* scan_rs, 5136 OopsInHeapRegionClosure* scan_rs,
5142 G1KlassScanClosure* scan_klasses, 5137 G1KlassScanClosure* scan_klasses,
5143 int worker_i) { 5138 uint worker_i) {
5144 5139
5145 // First scan the strong roots 5140 // First scan the strong roots
5146 double ext_roots_start = os::elapsedTime(); 5141 double ext_roots_start = os::elapsedTime();
5147 double closure_app_time_sec = 0.0; 5142 double closure_app_time_sec = 0.0;
5148 5143
5218 5213
5219 void 5214 void
5220 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { 5215 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5221 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); 5216 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5222 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); 5217 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5218 }
5219
5220 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
5221 private:
5222 BoolObjectClosure* _is_alive;
5223 int _initial_string_table_size;
5224 int _initial_symbol_table_size;
5225
5226 bool _process_strings;
5227 int _strings_processed;
5228 int _strings_removed;
5229
5230 bool _process_symbols;
5231 int _symbols_processed;
5232 int _symbols_removed;
5233
5234 bool _do_in_parallel;
5235 public:
5236 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
5237 AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
5238 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
5239 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
5240 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
5241
5242 _initial_string_table_size = StringTable::the_table()->table_size();
5243 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
5244 if (process_strings) {
5245 StringTable::clear_parallel_claimed_index();
5246 }
5247 if (process_symbols) {
5248 SymbolTable::clear_parallel_claimed_index();
5249 }
5250 }
5251
5252 ~G1StringSymbolTableUnlinkTask() {
5253 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
5254 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
5255 StringTable::parallel_claimed_index(), _initial_string_table_size));
5256 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
5257 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
5258 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
5259 }
5260
5261 void work(uint worker_id) {
5262 if (_do_in_parallel) {
5263 int strings_processed = 0;
5264 int strings_removed = 0;
5265 int symbols_processed = 0;
5266 int symbols_removed = 0;
5267 if (_process_strings) {
5268 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
5269 Atomic::add(strings_processed, &_strings_processed);
5270 Atomic::add(strings_removed, &_strings_removed);
5271 }
5272 if (_process_symbols) {
5273 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
5274 Atomic::add(symbols_processed, &_symbols_processed);
5275 Atomic::add(symbols_removed, &_symbols_removed);
5276 }
5277 } else {
5278 if (_process_strings) {
5279 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
5280 }
5281 if (_process_symbols) {
5282 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
5283 }
5284 }
5285 }
5286
5287 size_t strings_processed() const { return (size_t)_strings_processed; }
5288 size_t strings_removed() const { return (size_t)_strings_removed; }
5289
5290 size_t symbols_processed() const { return (size_t)_symbols_processed; }
5291 size_t symbols_removed() const { return (size_t)_symbols_removed; }
5292 };
5293
5294 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5295 bool process_strings, bool process_symbols) {
5296 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5297 _g1h->workers()->active_workers() : 1);
5298
5299 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5300 if (G1CollectedHeap::use_parallel_gc_threads()) {
5301 set_par_threads(n_workers);
5302 workers()->run_task(&g1_unlink_task);
5303 set_par_threads(0);
5304 } else {
5305 g1_unlink_task.work(0);
5306 }
5307 if (G1TraceStringSymbolTableScrubbing) {
5308 gclog_or_tty->print_cr("Cleaned string and symbol table, "
5309 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
5310 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
5311 g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
5312 g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
5313 }
5314
5315 if (G1StringDedup::is_enabled()) {
5316 G1StringDedup::unlink(is_alive);
5317 }
5318 }
5319
5320 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
5321 public:
5322 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
5323 *card_ptr = CardTableModRefBS::dirty_card_val();
5324 return true;
5325 }
5326 };
5327
5328 void G1CollectedHeap::redirty_logged_cards() {
5329 guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
5330 double redirty_logged_cards_start = os::elapsedTime();
5331
5332 RedirtyLoggedCardTableEntryFastClosure redirty;
5333 dirty_card_queue_set().set_closure(&redirty);
5334 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5335
5336 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5337 dcq.merge_bufferlists(&dirty_card_queue_set());
5338 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5339
5340 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5223 } 5341 }
5224 5342
5225 // Weak Reference Processing support 5343 // Weak Reference Processing support
5226 5344
5227 // An always "is_alive" closure that is used to preserve referents. 5345 // An always "is_alive" closure that is used to preserve referents.
5310 // updating the RSet. 5428 // updating the RSet.
5311 5429
5312 if (_g1h->is_in_g1_reserved(p)) { 5430 if (_g1h->is_in_g1_reserved(p)) {
5313 _par_scan_state->push_on_queue(p); 5431 _par_scan_state->push_on_queue(p);
5314 } else { 5432 } else {
5315 assert(!ClassLoaderDataGraph::contains((address)p), 5433 assert(!Metaspace::contains((const void*)p),
5316 err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) " 5434 err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5317 PTR_FORMAT, p)); 5435 PTR_FORMAT, p));
5318 _copy_non_heap_obj_cl->do_oop(p); 5436 _copy_non_heap_obj_cl->do_oop(p);
5319 } 5437 }
5320 } 5438 }
5400 ResourceMark rm; 5518 ResourceMark rm;
5401 HandleMark hm; 5519 HandleMark hm;
5402 5520
5403 G1STWIsAliveClosure is_alive(_g1h); 5521 G1STWIsAliveClosure is_alive(_g1h);
5404 5522
5405 G1ParScanThreadState pss(_g1h, worker_id); 5523 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5406
5407 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5408 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5524 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5409 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5525
5410
5411 pss.set_evac_closure(&scan_evac_cl);
5412 pss.set_evac_failure_closure(&evac_failure_cl); 5526 pss.set_evac_failure_closure(&evac_failure_cl);
5413 pss.set_partial_scan_closure(&partial_scan_cl);
5414 5527
5415 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); 5528 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5416 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); 5529 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5417 5530
5418 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); 5531 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5512 5625
5513 void work(uint worker_id) { 5626 void work(uint worker_id) {
5514 ResourceMark rm; 5627 ResourceMark rm;
5515 HandleMark hm; 5628 HandleMark hm;
5516 5629
5517 G1ParScanThreadState pss(_g1h, worker_id); 5630 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5518 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5519 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); 5631 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5520 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); 5632
5521
5522 pss.set_evac_closure(&scan_evac_cl);
5523 pss.set_evac_failure_closure(&evac_failure_cl); 5633 pss.set_evac_failure_closure(&evac_failure_cl);
5524 pss.set_partial_scan_closure(&partial_scan_cl);
5525 5634
5526 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); 5635 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5527 5636
5528 5637
5529 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); 5638 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5638 // of JNI refs is serial and performed serially by the current thread 5747 // of JNI refs is serial and performed serially by the current thread
5639 // rather than by a worker. The following PSS will be used for processing 5748 // rather than by a worker. The following PSS will be used for processing
5640 // JNI refs. 5749 // JNI refs.
5641 5750
5642 // Use only a single queue for this PSS. 5751 // Use only a single queue for this PSS.
5643 G1ParScanThreadState pss(this, 0); 5752 G1ParScanThreadState pss(this, 0, NULL);
5644 5753
5645 // We do not embed a reference processor in the copying/scanning 5754 // We do not embed a reference processor in the copying/scanning
5646 // closures while we're actually processing the discovered 5755 // closures while we're actually processing the discovered
5647 // reference objects. 5756 // reference objects.
5648 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
5649 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); 5757 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5650 G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL); 5758
5651
5652 pss.set_evac_closure(&scan_evac_cl);
5653 pss.set_evac_failure_closure(&evac_failure_cl); 5759 pss.set_evac_failure_closure(&evac_failure_cl);
5654 pss.set_partial_scan_closure(&partial_scan_cl);
5655 5760
5656 assert(pss.refs()->is_empty(), "pre-condition"); 5761 assert(pss.refs()->is_empty(), "pre-condition");
5657 5762
5658 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); 5763 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5659 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL); 5764 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
5831 // Weak root processing. 5936 // Weak root processing.
5832 { 5937 {
5833 G1STWIsAliveClosure is_alive(this); 5938 G1STWIsAliveClosure is_alive(this);
5834 G1KeepAliveClosure keep_alive(this); 5939 G1KeepAliveClosure keep_alive(this);
5835 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 5940 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5941 if (G1StringDedup::is_enabled()) {
5942 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5943 }
5836 } 5944 }
5837 5945
5838 release_gc_alloc_regions(n_workers, evacuation_info); 5946 release_gc_alloc_regions(n_workers, evacuation_info);
5839 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 5947 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5840 5948
5847 // Migrate the strong code roots attached to each region in 5955 // Migrate the strong code roots attached to each region in
5848 // the collection set. Ideally we would like to do this 5956 // the collection set. Ideally we would like to do this
5849 // after we have finished the scanning/evacuation of the 5957 // after we have finished the scanning/evacuation of the
5850 // strong code roots for a particular heap region. 5958 // strong code roots for a particular heap region.
5851 migrate_strong_code_roots(); 5959 migrate_strong_code_roots();
5960
5961 purge_code_root_memory();
5852 5962
5853 if (g1_policy()->during_initial_mark_pause()) { 5963 if (g1_policy()->during_initial_mark_pause()) {
5854 // Reset the claim values set during marking the strong code roots 5964 // Reset the claim values set during marking the strong code roots
5855 reset_heap_region_claim_values(); 5965 reset_heap_region_claim_values();
5856 } 5966 }
5874 // cards). We need these updates logged to update any 5984 // cards). We need these updates logged to update any
5875 // RSets. 5985 // RSets.
5876 enqueue_discovered_references(n_workers); 5986 enqueue_discovered_references(n_workers);
5877 5987
5878 if (G1DeferredRSUpdate) { 5988 if (G1DeferredRSUpdate) {
5879 RedirtyLoggedCardTableEntryFastClosure redirty; 5989 redirty_logged_cards();
5880 dirty_card_queue_set().set_closure(&redirty);
5881 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5882
5883 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5884 dcq.merge_bufferlists(&dirty_card_queue_set());
5885 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5886 } 5990 }
5887 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 5991 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5888 } 5992 }
5889 5993
5890 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5891 size_t* pre_used,
5892 FreeRegionList* free_list,
5893 OldRegionSet* old_proxy_set,
5894 HumongousRegionSet* humongous_proxy_set,
5895 HRRSCleanupTask* hrrs_cleanup_task,
5896 bool par) {
5897 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5898 if (hr->isHumongous()) {
5899 assert(hr->startsHumongous(), "we should only see starts humongous");
5900 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5901 } else {
5902 _old_set.remove_with_proxy(hr, old_proxy_set);
5903 free_region(hr, pre_used, free_list, par);
5904 }
5905 } else {
5906 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5907 }
5908 }
5909
5910 void G1CollectedHeap::free_region(HeapRegion* hr, 5994 void G1CollectedHeap::free_region(HeapRegion* hr,
5911 size_t* pre_used,
5912 FreeRegionList* free_list, 5995 FreeRegionList* free_list,
5913 bool par) { 5996 bool par,
5997 bool locked) {
5914 assert(!hr->isHumongous(), "this is only for non-humongous regions"); 5998 assert(!hr->isHumongous(), "this is only for non-humongous regions");
5915 assert(!hr->is_empty(), "the region should not be empty"); 5999 assert(!hr->is_empty(), "the region should not be empty");
5916 assert(free_list != NULL, "pre-condition"); 6000 assert(free_list != NULL, "pre-condition");
5917 6001
5918 // Clear the card counts for this region. 6002 // Clear the card counts for this region.
5919 // Note: we only need to do this if the region is not young 6003 // Note: we only need to do this if the region is not young
5920 // (since we don't refine cards in young regions). 6004 // (since we don't refine cards in young regions).
5921 if (!hr->is_young()) { 6005 if (!hr->is_young()) {
5922 _cg1r->hot_card_cache()->reset_card_counts(hr); 6006 _cg1r->hot_card_cache()->reset_card_counts(hr);
5923 } 6007 }
5924 *pre_used += hr->used(); 6008 hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5925 hr->hr_clear(par, true /* clear_space */); 6009 free_list->add_ordered(hr);
5926 free_list->add_as_head(hr);
5927 } 6010 }
5928 6011
5929 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, 6012 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5930 size_t* pre_used,
5931 FreeRegionList* free_list, 6013 FreeRegionList* free_list,
5932 HumongousRegionSet* humongous_proxy_set,
5933 bool par) { 6014 bool par) {
5934 assert(hr->startsHumongous(), "this is only for starts humongous regions"); 6015 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5935 assert(free_list != NULL, "pre-condition"); 6016 assert(free_list != NULL, "pre-condition");
5936 assert(humongous_proxy_set != NULL, "pre-condition"); 6017
5937
5938 size_t hr_used = hr->used();
5939 size_t hr_capacity = hr->capacity(); 6018 size_t hr_capacity = hr->capacity();
5940 size_t hr_pre_used = 0;
5941 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5942 // We need to read this before we make the region non-humongous, 6019 // We need to read this before we make the region non-humongous,
5943 // otherwise the information will be gone. 6020 // otherwise the information will be gone.
5944 uint last_index = hr->last_hc_index(); 6021 uint last_index = hr->last_hc_index();
5945 hr->set_notHumongous(); 6022 hr->set_notHumongous();
5946 free_region(hr, &hr_pre_used, free_list, par); 6023 free_region(hr, free_list, par);
5947 6024
5948 uint i = hr->hrs_index() + 1; 6025 uint i = hr->hrs_index() + 1;
5949 while (i < last_index) { 6026 while (i < last_index) {
5950 HeapRegion* curr_hr = region_at(i); 6027 HeapRegion* curr_hr = region_at(i);
5951 assert(curr_hr->continuesHumongous(), "invariant"); 6028 assert(curr_hr->continuesHumongous(), "invariant");
5952 curr_hr->set_notHumongous(); 6029 curr_hr->set_notHumongous();
5953 free_region(curr_hr, &hr_pre_used, free_list, par); 6030 free_region(curr_hr, free_list, par);
5954 i += 1; 6031 i += 1;
5955 } 6032 }
5956 assert(hr_pre_used == hr_used, 6033 }
5957 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " 6034
5958 "should be the same", hr_pre_used, hr_used)); 6035 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5959 *pre_used += hr_pre_used; 6036 const HeapRegionSetCount& humongous_regions_removed) {
5960 } 6037 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5961 6038 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5962 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, 6039 _old_set.bulk_remove(old_regions_removed);
5963 FreeRegionList* free_list, 6040 _humongous_set.bulk_remove(humongous_regions_removed);
5964 OldRegionSet* old_proxy_set, 6041 }
5965 HumongousRegionSet* humongous_proxy_set, 6042
5966 bool par) { 6043 }
5967 if (pre_used > 0) { 6044
5968 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; 6045 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5969 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); 6046 assert(list != NULL, "list can't be null");
5970 assert(_summary_bytes_used >= pre_used, 6047 if (!list->is_empty()) {
5971 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
5972 "should be >= pre_used: "SIZE_FORMAT,
5973 _summary_bytes_used, pre_used));
5974 _summary_bytes_used -= pre_used;
5975 }
5976 if (free_list != NULL && !free_list->is_empty()) {
5977 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 6048 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5978 _free_list.add_as_head(free_list); 6049 _free_list.add_ordered(list);
5979 } 6050 }
5980 if (old_proxy_set != NULL && !old_proxy_set->is_empty()) { 6051 }
5981 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 6052
5982 _old_set.update_from_proxy(old_proxy_set); 6053 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5983 } 6054 assert(_summary_bytes_used >= bytes,
5984 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { 6055 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
5985 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 6056 _summary_bytes_used, bytes));
5986 _humongous_set.update_from_proxy(humongous_proxy_set); 6057 _summary_bytes_used -= bytes;
5987 }
5988 } 6058 }
5989 6059
5990 class G1ParCleanupCTTask : public AbstractGangTask { 6060 class G1ParCleanupCTTask : public AbstractGangTask {
5991 G1SATBCardTableModRefBS* _ct_bs; 6061 G1SATBCardTableModRefBS* _ct_bs;
5992 G1CollectedHeap* _g1h; 6062 G1CollectedHeap* _g1h;
6142 start_sec = os::elapsedTime(); 6212 start_sec = os::elapsedTime();
6143 non_young = true; 6213 non_young = true;
6144 } 6214 }
6145 } 6215 }
6146 6216
6147 rs_lengths += cur->rem_set()->occupied(); 6217 rs_lengths += cur->rem_set()->occupied_locked();
6148 6218
6149 HeapRegion* next = cur->next_in_collection_set(); 6219 HeapRegion* next = cur->next_in_collection_set();
6150 assert(cur->in_collection_set(), "bad CS"); 6220 assert(cur->in_collection_set(), "bad CS");
6151 cur->set_next_in_collection_set(NULL); 6221 cur->set_next_in_collection_set(NULL);
6152 cur->set_in_collection_set(false); 6222 cur->set_in_collection_set(false);
6175 if (!cur->evacuation_failed()) { 6245 if (!cur->evacuation_failed()) {
6176 MemRegion used_mr = cur->used_region(); 6246 MemRegion used_mr = cur->used_region();
6177 6247
6178 // And the region is empty. 6248 // And the region is empty.
6179 assert(!used_mr.is_empty(), "Should not have empty regions in a CS."); 6249 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6180 free_region(cur, &pre_used, &local_free_list, false /* par */); 6250 pre_used += cur->used();
6251 free_region(cur, &local_free_list, false /* par */, true /* locked */);
6181 } else { 6252 } else {
6182 cur->uninstall_surv_rate_group(); 6253 cur->uninstall_surv_rate_group();
6183 if (cur->is_young()) { 6254 if (cur->is_young()) {
6184 cur->set_young_index_in_cset(-1); 6255 cur->set_young_index_in_cset(-1);
6185 } 6256 }
6203 non_young_time_ms += elapsed_ms; 6274 non_young_time_ms += elapsed_ms;
6204 } else { 6275 } else {
6205 young_time_ms += elapsed_ms; 6276 young_time_ms += elapsed_ms;
6206 } 6277 }
6207 6278
6208 update_sets_after_freeing_regions(pre_used, &local_free_list, 6279 prepend_to_freelist(&local_free_list);
6209 NULL /* old_proxy_set */, 6280 decrement_summary_bytes(pre_used);
6210 NULL /* humongous_proxy_set */,
6211 false /* par */);
6212 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); 6281 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6213 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); 6282 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6214 } 6283 }
6215 6284
6216 // This routine is similar to the above but does not record 6285 // This routine is similar to the above but does not record
6318 return ret; 6387 return ret;
6319 } 6388 }
6320 6389
6321 class TearDownRegionSetsClosure : public HeapRegionClosure { 6390 class TearDownRegionSetsClosure : public HeapRegionClosure {
6322 private: 6391 private:
6323 OldRegionSet *_old_set; 6392 HeapRegionSet *_old_set;
6324 6393
6325 public: 6394 public:
6326 TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { } 6395 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6327 6396
6328 bool doHeapRegion(HeapRegion* r) { 6397 bool doHeapRegion(HeapRegion* r) {
6329 if (r->is_empty()) { 6398 if (r->is_empty()) {
6330 // We ignore empty regions, we'll empty the free list afterwards 6399 // We ignore empty regions, we'll empty the free list afterwards
6331 } else if (r->is_young()) { 6400 } else if (r->is_young()) {
6350 6419
6351 if (!free_list_only) { 6420 if (!free_list_only) {
6352 TearDownRegionSetsClosure cl(&_old_set); 6421 TearDownRegionSetsClosure cl(&_old_set);
6353 heap_region_iterate(&cl); 6422 heap_region_iterate(&cl);
6354 6423
6355 // Need to do this after the heap iteration to be able to 6424 // Note that emptying the _young_list is postponed and instead done as
6356 // recognize the young regions and ignore them during the iteration. 6425 // the first step when rebuilding the regions sets again. The reason for
6357 _young_list->empty_list(); 6426 // this is that during a full GC string deduplication needs to know if
6427 // a collected region was young or old when the full GC was initiated.
6358 } 6428 }
6359 _free_list.remove_all(); 6429 _free_list.remove_all();
6360 } 6430 }
6361 6431
6362 class RebuildRegionSetsClosure : public HeapRegionClosure { 6432 class RebuildRegionSetsClosure : public HeapRegionClosure {
6363 private: 6433 private:
6364 bool _free_list_only; 6434 bool _free_list_only;
6365 OldRegionSet* _old_set; 6435 HeapRegionSet* _old_set;
6366 FreeRegionList* _free_list; 6436 FreeRegionList* _free_list;
6367 size_t _total_used; 6437 size_t _total_used;
6368 6438
6369 public: 6439 public:
6370 RebuildRegionSetsClosure(bool free_list_only, 6440 RebuildRegionSetsClosure(bool free_list_only,
6371 OldRegionSet* old_set, FreeRegionList* free_list) : 6441 HeapRegionSet* old_set, FreeRegionList* free_list) :
6372 _free_list_only(free_list_only), 6442 _free_list_only(free_list_only),
6373 _old_set(old_set), _free_list(free_list), _total_used(0) { 6443 _old_set(old_set), _free_list(free_list), _total_used(0) {
6374 assert(_free_list->is_empty(), "pre-condition"); 6444 assert(_free_list->is_empty(), "pre-condition");
6375 if (!free_list_only) { 6445 if (!free_list_only) {
6376 assert(_old_set->is_empty(), "pre-condition"); 6446 assert(_old_set->is_empty(), "pre-condition");
6405 } 6475 }
6406 }; 6476 };
6407 6477
6408 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { 6478 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6409 assert_at_safepoint(true /* should_be_vm_thread */); 6479 assert_at_safepoint(true /* should_be_vm_thread */);
6480
6481 if (!free_list_only) {
6482 _young_list->empty_list();
6483 }
6410 6484
6411 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); 6485 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6412 heap_region_iterate(&cl); 6486 heap_region_iterate(&cl);
6413 6487
6414 if (!free_list_only) { 6488 if (!free_list_only) {
6441 assert(!force || g1_policy()->can_expand_young_list(), 6515 assert(!force || g1_policy()->can_expand_young_list(),
6442 "if force is true we should be able to expand the young list"); 6516 "if force is true we should be able to expand the young list");
6443 bool young_list_full = g1_policy()->is_young_list_full(); 6517 bool young_list_full = g1_policy()->is_young_list_full();
6444 if (force || !young_list_full) { 6518 if (force || !young_list_full) {
6445 HeapRegion* new_alloc_region = new_region(word_size, 6519 HeapRegion* new_alloc_region = new_region(word_size,
6520 false /* is_old */,
6446 false /* do_expand */); 6521 false /* do_expand */);
6447 if (new_alloc_region != NULL) { 6522 if (new_alloc_region != NULL) {
6448 set_region_short_lived_locked(new_alloc_region); 6523 set_region_short_lived_locked(new_alloc_region);
6449 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); 6524 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6450 return new_alloc_region; 6525 return new_alloc_region;
6499 uint count, 6574 uint count,
6500 GCAllocPurpose ap) { 6575 GCAllocPurpose ap) {
6501 assert(FreeList_lock->owned_by_self(), "pre-condition"); 6576 assert(FreeList_lock->owned_by_self(), "pre-condition");
6502 6577
6503 if (count < g1_policy()->max_regions(ap)) { 6578 if (count < g1_policy()->max_regions(ap)) {
6579 bool survivor = (ap == GCAllocForSurvived);
6504 HeapRegion* new_alloc_region = new_region(word_size, 6580 HeapRegion* new_alloc_region = new_region(word_size,
6581 !survivor,
6505 true /* do_expand */); 6582 true /* do_expand */);
6506 if (new_alloc_region != NULL) { 6583 if (new_alloc_region != NULL) {
6507 // We really only need to do this for old regions given that we 6584 // We really only need to do this for old regions given that we
6508 // should never scan survivors. But it doesn't hurt to do it 6585 // should never scan survivors. But it doesn't hurt to do it
6509 // for survivors too. 6586 // for survivors too.
6510 new_alloc_region->set_saved_mark(); 6587 new_alloc_region->set_saved_mark();
6511 if (ap == GCAllocForSurvived) { 6588 if (survivor) {
6512 new_alloc_region->set_survivor(); 6589 new_alloc_region->set_survivor();
6513 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); 6590 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6514 } else { 6591 } else {
6515 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); 6592 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6516 } 6593 }
6563 } 6640 }
6564 // Heap region set verification 6641 // Heap region set verification
6565 6642
6566 class VerifyRegionListsClosure : public HeapRegionClosure { 6643 class VerifyRegionListsClosure : public HeapRegionClosure {
6567 private: 6644 private:
6568 FreeRegionList* _free_list; 6645 HeapRegionSet* _old_set;
6569 OldRegionSet* _old_set; 6646 HeapRegionSet* _humongous_set;
6570 HumongousRegionSet* _humongous_set; 6647 FreeRegionList* _free_list;
6571 uint _region_count;
6572 6648
6573 public: 6649 public:
6574 VerifyRegionListsClosure(OldRegionSet* old_set, 6650 HeapRegionSetCount _old_count;
6575 HumongousRegionSet* humongous_set, 6651 HeapRegionSetCount _humongous_count;
6652 HeapRegionSetCount _free_count;
6653
6654 VerifyRegionListsClosure(HeapRegionSet* old_set,
6655 HeapRegionSet* humongous_set,
6576 FreeRegionList* free_list) : 6656 FreeRegionList* free_list) :
6577 _old_set(old_set), _humongous_set(humongous_set), 6657 _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
6578 _free_list(free_list), _region_count(0) { } 6658 _old_count(), _humongous_count(), _free_count(){ }
6579
6580 uint region_count() { return _region_count; }
6581 6659
6582 bool doHeapRegion(HeapRegion* hr) { 6660 bool doHeapRegion(HeapRegion* hr) {
6583 _region_count += 1;
6584
6585 if (hr->continuesHumongous()) { 6661 if (hr->continuesHumongous()) {
6586 return false; 6662 return false;
6587 } 6663 }
6588 6664
6589 if (hr->is_young()) { 6665 if (hr->is_young()) {
6590 // TODO 6666 // TODO
6591 } else if (hr->startsHumongous()) { 6667 } else if (hr->startsHumongous()) {
6592 _humongous_set->verify_next_region(hr); 6668 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
6669 _humongous_count.increment(1u, hr->capacity());
6593 } else if (hr->is_empty()) { 6670 } else if (hr->is_empty()) {
6594 _free_list->verify_next_region(hr); 6671 assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
6672 _free_count.increment(1u, hr->capacity());
6595 } else { 6673 } else {
6596 _old_set->verify_next_region(hr); 6674 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
6675 _old_count.increment(1u, hr->capacity());
6597 } 6676 }
6598 return false; 6677 return false;
6678 }
6679
6680 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
6681 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6682 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6683 old_set->total_capacity_bytes(), _old_count.capacity()));
6684
6685 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6686 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6687 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6688
6689 guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
6690 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6691 free_list->total_capacity_bytes(), _free_count.capacity()));
6599 } 6692 }
6600 }; 6693 };
6601 6694
6602 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index, 6695 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6603 HeapWord* bottom) { 6696 HeapWord* bottom) {
6610 6703
6611 void G1CollectedHeap::verify_region_sets() { 6704 void G1CollectedHeap::verify_region_sets() {
6612 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6705 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6613 6706
6614 // First, check the explicit lists. 6707 // First, check the explicit lists.
6615 _free_list.verify(); 6708 _free_list.verify_list();
6616 { 6709 {
6617 // Given that a concurrent operation might be adding regions to 6710 // Given that a concurrent operation might be adding regions to
6618 // the secondary free list we have to take the lock before 6711 // the secondary free list we have to take the lock before
6619 // verifying it. 6712 // verifying it.
6620 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6713 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6621 _secondary_free_list.verify(); 6714 _secondary_free_list.verify_list();
6622 } 6715 }
6623 _old_set.verify();
6624 _humongous_set.verify();
6625 6716
6626 // If a concurrent region freeing operation is in progress it will 6717 // If a concurrent region freeing operation is in progress it will
6627 // be difficult to correctly attributed any free regions we come 6718 // be difficult to correctly attributed any free regions we come
6628 // across to the correct free list given that they might belong to 6719 // across to the correct free list given that they might belong to
6629 // one of several (free_list, secondary_free_list, any local lists, 6720 // one of several (free_list, secondary_free_list, any local lists,
6642 // attributed to the free_list. 6733 // attributed to the free_list.
6643 append_secondary_free_list_if_not_empty_with_lock(); 6734 append_secondary_free_list_if_not_empty_with_lock();
6644 6735
6645 // Finally, make sure that the region accounting in the lists is 6736 // Finally, make sure that the region accounting in the lists is
6646 // consistent with what we see in the heap. 6737 // consistent with what we see in the heap.
6647 _old_set.verify_start();
6648 _humongous_set.verify_start();
6649 _free_list.verify_start();
6650 6738
6651 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); 6739 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6652 heap_region_iterate(&cl); 6740 heap_region_iterate(&cl);
6653 6741 cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
6654 _old_set.verify_end();
6655 _humongous_set.verify_end();
6656 _free_list.verify_end();
6657 } 6742 }
6658 6743
6659 // Optimized nmethod scanning 6744 // Optimized nmethod scanning
6660 6745
6661 class RegisterNMethodOopClosure: public OopClosure { 6746 class RegisterNMethodOopClosure: public OopClosure {
6750 MigrateCodeRootsHeapRegionClosure cl; 6835 MigrateCodeRootsHeapRegionClosure cl;
6751 double migrate_start = os::elapsedTime(); 6836 double migrate_start = os::elapsedTime();
6752 collection_set_iterate(&cl); 6837 collection_set_iterate(&cl);
6753 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0; 6838 double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6754 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms); 6839 g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6840 }
6841
6842 void G1CollectedHeap::purge_code_root_memory() {
6843 double purge_start = os::elapsedTime();
6844 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
6845 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
6846 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
6755 } 6847 }
6756 6848
6757 // Mark all the code roots that point into regions *not* in the 6849 // Mark all the code roots that point into regions *not* in the
6758 // collection set. 6850 // collection set.
6759 // 6851 //
6822 HeapRegionRemSet* hrrs = hr->rem_set(); 6914 HeapRegionRemSet* hrrs = hr->rem_set();
6823 if (hr->continuesHumongous()) { 6915 if (hr->continuesHumongous()) {
6824 // Code roots should never be attached to a continuation of a humongous region 6916 // Code roots should never be attached to a continuation of a humongous region
6825 assert(hrrs->strong_code_roots_list_length() == 0, 6917 assert(hrrs->strong_code_roots_list_length() == 0,
6826 err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT 6918 err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
6827 " starting at "HR_FORMAT", but has "INT32_FORMAT, 6919 " starting at "HR_FORMAT", but has "SIZE_FORMAT,
6828 HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()), 6920 HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
6829 hrrs->strong_code_roots_list_length())); 6921 hrrs->strong_code_roots_list_length()));
6830 return false; 6922 return false;
6831 } 6923 }
6832 6924