comparison src/share/vm/memory/genCollectedHeap.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents bbc900c2482a
children 4202510ee0fe
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
28 #include "classfile/vmSymbols.hpp" 28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp" 29 #include "code/icBuffer.hpp"
30 #include "gc_implementation/shared/collectorCounters.hpp" 30 #include "gc_implementation/shared/collectorCounters.hpp"
31 #include "gc_implementation/shared/vmGCOperations.hpp" 31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 #include "gc_interface/collectedHeap.inline.hpp" 32 #include "gc_interface/collectedHeap.inline.hpp"
33 #include "memory/compactPermGen.hpp"
34 #include "memory/filemap.hpp" 33 #include "memory/filemap.hpp"
35 #include "memory/gcLocker.inline.hpp" 34 #include "memory/gcLocker.inline.hpp"
36 #include "memory/genCollectedHeap.hpp" 35 #include "memory/genCollectedHeap.hpp"
37 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/genOopClosures.inline.hpp"
38 #include "memory/generation.inline.hpp" 37 #include "memory/generation.inline.hpp"
39 #include "memory/generationSpec.hpp" 38 #include "memory/generationSpec.hpp"
40 #include "memory/permGen.hpp"
41 #include "memory/resourceArea.hpp" 39 #include "memory/resourceArea.hpp"
42 #include "memory/sharedHeap.hpp" 40 #include "memory/sharedHeap.hpp"
43 #include "memory/space.hpp" 41 #include "memory/space.hpp"
44 #include "oops/oop.inline.hpp" 42 #include "oops/oop.inline.hpp"
45 #include "oops/oop.inline2.hpp" 43 #include "oops/oop.inline2.hpp"
78 if (_gen_process_strong_tasks == NULL || 76 if (_gen_process_strong_tasks == NULL ||
79 !_gen_process_strong_tasks->valid()) { 77 !_gen_process_strong_tasks->valid()) {
80 vm_exit_during_initialization("Failed necessary allocation."); 78 vm_exit_during_initialization("Failed necessary allocation.");
81 } 79 }
82 assert(policy != NULL, "Sanity check"); 80 assert(policy != NULL, "Sanity check");
83 _preloading_shared_classes = false;
84 } 81 }
85 82
86 jint GenCollectedHeap::initialize() { 83 jint GenCollectedHeap::initialize() {
87 CollectedHeap::pre_initialize(); 84 CollectedHeap::pre_initialize();
88 85
98 95
99 // The heap must be at least as aligned as generations. 96 // The heap must be at least as aligned as generations.
100 size_t alignment = Generation::GenGrain; 97 size_t alignment = Generation::GenGrain;
101 98
102 _gen_specs = gen_policy()->generations(); 99 _gen_specs = gen_policy()->generations();
103 PermanentGenerationSpec *perm_gen_spec =
104 collector_policy()->permanent_generation();
105 100
106 // Make sure the sizes are all aligned. 101 // Make sure the sizes are all aligned.
107 for (i = 0; i < _n_gens; i++) { 102 for (i = 0; i < _n_gens; i++) {
108 _gen_specs[i]->align(alignment); 103 _gen_specs[i]->align(alignment);
109 }
110 perm_gen_spec->align(alignment);
111
112 // If we are dumping the heap, then allocate a wasted block of address
113 // space in order to push the heap to a lower address. This extra
114 // address range allows for other (or larger) libraries to be loaded
115 // without them occupying the space required for the shared spaces.
116
117 if (DumpSharedSpaces) {
118 uintx reserved = 0;
119 uintx block_size = 64*1024*1024;
120 while (reserved < SharedDummyBlockSize) {
121 char* dummy = os::reserve_memory(block_size);
122 reserved += block_size;
123 }
124 } 104 }
125 105
126 // Allocate space for the heap. 106 // Allocate space for the heap.
127 107
128 char* heap_address; 108 char* heap_address;
129 size_t total_reserved = 0; 109 size_t total_reserved = 0;
130 int n_covered_regions = 0; 110 int n_covered_regions = 0;
131 ReservedSpace heap_rs(0); 111 ReservedSpace heap_rs(0);
132 112
133 heap_address = allocate(alignment, perm_gen_spec, &total_reserved, 113 heap_address = allocate(alignment, &total_reserved,
134 &n_covered_regions, &heap_rs); 114 &n_covered_regions, &heap_rs);
135
136 if (UseSharedSpaces) {
137 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
138 if (heap_rs.is_reserved()) {
139 heap_rs.release();
140 }
141 FileMapInfo* mapinfo = FileMapInfo::current_info();
142 mapinfo->fail_continue("Unable to reserve shared region.");
143 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
144 &heap_rs);
145 }
146 }
147 115
148 if (!heap_rs.is_reserved()) { 116 if (!heap_rs.is_reserved()) {
149 vm_shutdown_during_initialization( 117 vm_shutdown_during_initialization(
150 "Could not reserve enough space for object heap"); 118 "Could not reserve enough space for object heap");
151 return JNI_ENOMEM; 119 return JNI_ENOMEM;
156 124
157 // It is important to do this in a way such that concurrent readers can't 125 // It is important to do this in a way such that concurrent readers can't
158 // temporarily think somethings in the heap. (Seen this happen in asserts.) 126 // temporarily think somethings in the heap. (Seen this happen in asserts.)
159 _reserved.set_word_size(0); 127 _reserved.set_word_size(0);
160 _reserved.set_start((HeapWord*)heap_rs.base()); 128 _reserved.set_start((HeapWord*)heap_rs.base());
161 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() 129 size_t actual_heap_size = heap_rs.size();
162 - perm_gen_spec->misc_code_size();
163 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); 130 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
164 131
165 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); 132 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
166 set_barrier_set(rem_set()->bs()); 133 set_barrier_set(rem_set()->bs());
167 134
168 _gch = this; 135 _gch = this;
169 136
170 for (i = 0; i < _n_gens; i++) { 137 for (i = 0; i < _n_gens; i++) {
171 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), 138 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
172 UseSharedSpaces, UseSharedSpaces);
173 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); 139 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
174 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); 140 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
175 } 141 }
176 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
177
178 clear_incremental_collection_failed(); 142 clear_incremental_collection_failed();
179 143
180 #ifndef SERIALGC 144 #ifndef SERIALGC
181 // If we are running CMS, create the collector responsible 145 // If we are running CMS, create the collector responsible
182 // for collecting the CMS generations. 146 // for collecting the CMS generations.
189 return JNI_OK; 153 return JNI_OK;
190 } 154 }
191 155
192 156
193 char* GenCollectedHeap::allocate(size_t alignment, 157 char* GenCollectedHeap::allocate(size_t alignment,
194 PermanentGenerationSpec* perm_gen_spec,
195 size_t* _total_reserved, 158 size_t* _total_reserved,
196 int* _n_covered_regions, 159 int* _n_covered_regions,
197 ReservedSpace* heap_rs){ 160 ReservedSpace* heap_rs){
198 const char overflow_msg[] = "The size of the object heap + VM data exceeds " 161 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
199 "the maximum representable size"; 162 "the maximum representable size";
212 n_covered_regions += _gen_specs[i]->n_covered_regions(); 175 n_covered_regions += _gen_specs[i]->n_covered_regions();
213 } 176 }
214 assert(total_reserved % pageSize == 0, 177 assert(total_reserved % pageSize == 0,
215 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize=" 178 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
216 SIZE_FORMAT, total_reserved, pageSize)); 179 SIZE_FORMAT, total_reserved, pageSize));
217 total_reserved += perm_gen_spec->max_size(); 180
218 assert(total_reserved % pageSize == 0, 181 // Needed until the cardtable is fixed to have the right number
219 err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize=" 182 // of covered regions.
220 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved, 183 n_covered_regions += 2;
221 pageSize, perm_gen_spec->max_size()));
222
223 if (total_reserved < perm_gen_spec->max_size()) {
224 vm_exit_during_initialization(overflow_msg);
225 }
226 n_covered_regions += perm_gen_spec->n_covered_regions();
227
228 // Add the size of the data area which shares the same reserved area
229 // as the heap, but which is not actually part of the heap.
230 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
231
232 total_reserved += s;
233 if (total_reserved < s) {
234 vm_exit_during_initialization(overflow_msg);
235 }
236 184
237 if (UseLargePages) { 185 if (UseLargePages) {
238 assert(total_reserved != 0, "total_reserved cannot be 0"); 186 assert(total_reserved != 0, "total_reserved cannot be 0");
239 total_reserved = round_to(total_reserved, os::large_page_size()); 187 total_reserved = round_to(total_reserved, os::large_page_size());
240 if (total_reserved < os::large_page_size()) { 188 if (total_reserved < os::large_page_size()) {
241 vm_exit_during_initialization(overflow_msg); 189 vm_exit_during_initialization(overflow_msg);
242 } 190 }
243 } 191 }
244 192
245 // Calculate the address at which the heap must reside in order for
246 // the shared data to be at the required address.
247
248 char* heap_address;
249 if (UseSharedSpaces) {
250
251 // Calculate the address of the first word beyond the heap.
252 FileMapInfo* mapinfo = FileMapInfo::current_info();
253 int lr = CompactingPermGenGen::n_regions - 1;
254 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
255 heap_address = mapinfo->region_base(lr) + capacity;
256
257 // Calculate the address of the first word of the heap.
258 heap_address -= total_reserved;
259 } else {
260 heap_address = NULL; // any address will do.
261 if (UseCompressedOops) {
262 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
263 *_total_reserved = total_reserved; 193 *_total_reserved = total_reserved;
264 *_n_covered_regions = n_covered_regions; 194 *_n_covered_regions = n_covered_regions;
265 *heap_rs = ReservedHeapSpace(total_reserved, alignment, 195 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
266 UseLargePages, heap_address); 196 return heap_rs->base();
267
268 if (heap_address != NULL && !heap_rs->is_reserved()) {
269 // Failed to reserve at specified address - the requested memory
270 // region is taken already, for example, by 'java' launcher.
271 // Try again to reserver heap higher.
272 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
273 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
274 UseLargePages, heap_address);
275
276 if (heap_address != NULL && !heap_rs->is_reserved()) {
277 // Failed to reserve at specified address again - give up.
278 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
279 assert(heap_address == NULL, "");
280 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
281 UseLargePages, heap_address);
282 }
283 }
284 return heap_address;
285 }
286 }
287
288 *_total_reserved = total_reserved;
289 *_n_covered_regions = n_covered_regions;
290 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
291 UseLargePages, heap_address);
292
293 return heap_address;
294 } 197 }
295 198
296 199
297 void GenCollectedHeap::post_initialize() { 200 void GenCollectedHeap::post_initialize() {
298 SharedHeap::post_initialize(); 201 SharedHeap::post_initialize();
338 res += _gens[i]->used(); 241 res += _gens[i]->used();
339 } 242 }
340 return res; 243 return res;
341 } 244 }
342 245
343 // Save the "used_region" for generations level and lower, 246 // Save the "used_region" for generations level and lower.
344 // and, if perm is true, for perm gen. 247 void GenCollectedHeap::save_used_regions(int level) {
345 void GenCollectedHeap::save_used_regions(int level, bool perm) {
346 assert(level < _n_gens, "Illegal level parameter"); 248 assert(level < _n_gens, "Illegal level parameter");
347 for (int i = level; i >= 0; i--) { 249 for (int i = level; i >= 0; i--) {
348 _gens[i]->save_used_region(); 250 _gens[i]->save_used_region();
349 }
350 if (perm) {
351 perm_gen()->save_used_region();
352 } 251 }
353 } 252 }
354 253
355 size_t GenCollectedHeap::max_capacity() const { 254 size_t GenCollectedHeap::max_capacity() const {
356 size_t res = 0; 255 size_t res = 0;
475 const bool do_clear_all_soft_refs = clear_all_soft_refs || 374 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
476 collector_policy()->should_clear_all_soft_refs(); 375 collector_policy()->should_clear_all_soft_refs();
477 376
478 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); 377 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
479 378
480 const size_t perm_prev_used = perm_gen()->used(); 379 const size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
481 380
482 print_heap_before_gc(); 381 print_heap_before_gc();
483 382
484 { 383 {
485 FlagSetting fl(_is_gc_active, true); 384 FlagSetting fl(_is_gc_active, true);
640 } 539 }
641 540
642 if (PrintGCDetails) { 541 if (PrintGCDetails) {
643 print_heap_change(gch_prev_used); 542 print_heap_change(gch_prev_used);
644 543
645 // Print perm gen info for full GC with PrintGCDetails flag. 544 // Print metaspace info for full GC with PrintGCDetails flag.
646 if (complete) { 545 if (complete) {
647 print_perm_heap_change(perm_prev_used); 546 MetaspaceAux::print_metaspace_change(metadata_prev_used);
648 } 547 }
649 } 548 }
650 549
651 for (int j = max_level_collected; j >= 0; j -= 1) { 550 for (int j = max_level_collected; j >= 0; j -= 1) {
652 // Adjust generation sizes. 551 // Adjust generation sizes.
653 _gens[j]->compute_new_size(); 552 _gens[j]->compute_new_size();
654 } 553 }
655 554
656 if (complete) { 555 if (complete) {
657 // Ask the permanent generation to adjust size for full collections 556 // Resize the metaspace capacity after full collections
658 perm()->compute_new_size(); 557 MetaspaceGC::compute_new_size();
659 update_full_collections_completed(); 558 update_full_collections_completed();
660 } 559 }
661 560
662 // Track memory usage and detect low memory after GC finishes 561 // Track memory usage and detect low memory after GC finishes
663 MemoryService::track_memory_usage(); 562 MemoryService::track_memory_usage();
664 563
665 gc_epilogue(complete); 564 gc_epilogue(complete);
565
566 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
567 if (complete) {
568 ClassLoaderDataGraph::purge();
569 }
666 570
667 if (must_restore_marks_for_biased_locking) { 571 if (must_restore_marks_for_biased_locking) {
668 BiasedLocking::restore_marks(); 572 BiasedLocking::restore_marks();
669 } 573 }
670 } 574 }
690 594
691 void GenCollectedHeap:: 595 void GenCollectedHeap::
692 gen_process_strong_roots(int level, 596 gen_process_strong_roots(int level,
693 bool younger_gens_as_roots, 597 bool younger_gens_as_roots,
694 bool activate_scope, 598 bool activate_scope,
695 bool collecting_perm_gen, 599 bool is_scavenging,
696 SharedHeap::ScanningOption so, 600 SharedHeap::ScanningOption so,
697 OopsInGenClosure* not_older_gens, 601 OopsInGenClosure* not_older_gens,
698 bool do_code_roots, 602 bool do_code_roots,
699 OopsInGenClosure* older_gens) { 603 OopsInGenClosure* older_gens,
604 KlassClosure* klass_closure) {
700 // General strong roots. 605 // General strong roots.
701 606
702 if (!do_code_roots) { 607 if (!do_code_roots) {
703 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, 608 SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
704 not_older_gens, NULL, older_gens); 609 not_older_gens, NULL, klass_closure);
705 } else { 610 } else {
706 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); 611 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
707 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); 612 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
708 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, 613 SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
709 not_older_gens, &code_roots, older_gens); 614 not_older_gens, &code_roots, klass_closure);
710 } 615 }
711 616
712 if (younger_gens_as_roots) { 617 if (younger_gens_as_roots) {
713 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { 618 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
714 for (int i = 0; i < level; i++) { 619 for (int i = 0; i < level; i++) {
746 OopClosureType* older) { \ 651 OopClosureType* older) { \
747 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ 652 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
748 for (int i = level+1; i < n_gens(); i++) { \ 653 for (int i = level+1; i < n_gens(); i++) { \
749 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ 654 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
750 } \ 655 } \
751 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
752 } 656 }
753 657
754 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) 658 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
755 659
756 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN 660 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
757 661
758 bool GenCollectedHeap::no_allocs_since_save_marks(int level) { 662 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
759 for (int i = level; i < _n_gens; i++) { 663 for (int i = level; i < _n_gens; i++) {
760 if (!_gens[i]->no_allocs_since_save_marks()) return false; 664 if (!_gens[i]->no_allocs_since_save_marks()) return false;
761 } 665 }
762 return perm_gen()->no_allocs_since_save_marks(); 666 return true;
763 } 667 }
764 668
765 bool GenCollectedHeap::supports_inline_contig_alloc() const { 669 bool GenCollectedHeap::supports_inline_contig_alloc() const {
766 return _gens[0]->supports_inline_contig_alloc(); 670 return _gens[0]->supports_inline_contig_alloc();
767 } 671 }
809 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 713 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
810 MutexLocker ml(Heap_lock); 714 MutexLocker ml(Heap_lock);
811 collect_locked(cause, max_level); 715 collect_locked(cause, max_level);
812 } 716 }
813 717
814 // This interface assumes that it's being called by the
815 // vm thread. It collects the heap assuming that the
816 // heap lock is already held and that we are executing in
817 // the context of the vm thread.
818 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
819 assert(Thread::current()->is_VM_thread(), "Precondition#1");
820 assert(Heap_lock->is_locked(), "Precondition#2");
821 GCCauseSetter gcs(this, cause);
822 switch (cause) {
823 case GCCause::_heap_inspection:
824 case GCCause::_heap_dump: {
825 HandleMark hm;
826 do_full_collection(false, // don't clear all soft refs
827 n_gens() - 1);
828 break;
829 }
830 default: // XXX FIX ME
831 ShouldNotReachHere(); // Unexpected use of this function
832 }
833 }
834
835 void GenCollectedHeap::collect_locked(GCCause::Cause cause) { 718 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
836 // The caller has the Heap_lock 719 // The caller has the Heap_lock
837 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); 720 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
838 collect_locked(cause, n_gens() - 1); 721 collect_locked(cause, n_gens() - 1);
839 } 722 }
840 723
841 // this is the private collection interface 724 // this is the private collection interface
842 // The Heap_lock is expected to be held on entry. 725 // The Heap_lock is expected to be held on entry.
843 726
844 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { 727 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
845 if (_preloading_shared_classes) {
846 report_out_of_shared_space(SharedPermGen);
847 }
848 // Read the GC count while holding the Heap_lock 728 // Read the GC count while holding the Heap_lock
849 unsigned int gc_count_before = total_collections(); 729 unsigned int gc_count_before = total_collections();
850 unsigned int full_gc_count_before = total_full_collections(); 730 unsigned int full_gc_count_before = total_full_collections();
851 { 731 {
852 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 732 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
858 738
859 #ifndef SERIALGC 739 #ifndef SERIALGC
860 bool GenCollectedHeap::create_cms_collector() { 740 bool GenCollectedHeap::create_cms_collector() {
861 741
862 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || 742 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
863 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && 743 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
864 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
865 "Unexpected generation kinds"); 744 "Unexpected generation kinds");
866 // Skip two header words in the block content verification 745 // Skip two header words in the block content verification
867 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) 746 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
868 CMSCollector* collector = new CMSCollector( 747 CMSCollector* collector = new CMSCollector(
869 (ConcurrentMarkSweepGeneration*)_gens[1], 748 (ConcurrentMarkSweepGeneration*)_gens[1],
870 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
871 _rem_set->as_CardTableRS(), 749 _rem_set->as_CardTableRS(),
872 (ConcurrentMarkSweepPolicy*) collector_policy()); 750 (ConcurrentMarkSweepPolicy*) collector_policy());
873 751
874 if (collector == NULL || !collector->completed_initialization()) { 752 if (collector == NULL || !collector->completed_initialization()) {
875 if (collector) { 753 if (collector) {
894 VMThread::execute(&op); 772 VMThread::execute(&op);
895 } 773 }
896 } 774 }
897 #endif // SERIALGC 775 #endif // SERIALGC
898 776
777 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
778 do_full_collection(clear_all_soft_refs, _n_gens - 1);
779 }
899 780
900 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, 781 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
901 int max_level) { 782 int max_level) {
902 int local_max_level; 783 int local_max_level;
903 if (!incremental_collection_will_fail(false /* don't consult_young */) && 784 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
952 // This might be sped up with a cache of the last generation that 833 // This might be sped up with a cache of the last generation that
953 // answered yes. 834 // answered yes.
954 for (int i = 0; i < _n_gens; i++) { 835 for (int i = 0; i < _n_gens; i++) {
955 if (_gens[i]->is_in(p)) return true; 836 if (_gens[i]->is_in(p)) return true;
956 } 837 }
957 if (_perm_gen->as_gen()->is_in(p)) return true;
958 // Otherwise... 838 // Otherwise...
959 return false; 839 return false;
960 } 840 }
961 841
962 #ifdef ASSERT 842 #ifdef ASSERT
963 // Don't implement this by using is_in_young(). This method is used 843 // Don't implement this by using is_in_young(). This method is used
964 // in some cases to check that is_in_young() is correct. 844 // in some cases to check that is_in_young() is correct.
965 bool GenCollectedHeap::is_in_partial_collection(const void* p) { 845 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
966 assert(is_in_reserved(p) || p == NULL, 846 assert(is_in_reserved(p) || p == NULL,
967 "Does not work if address is non-null and outside of the heap"); 847 "Does not work if address is non-null and outside of the heap");
968 // The order of the generations is young (low addr), old, perm (high addr)
969 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; 848 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
970 } 849 }
971 #endif 850 #endif
972 851
973 void GenCollectedHeap::oop_iterate(OopClosure* cl) { 852 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
974 for (int i = 0; i < _n_gens; i++) { 853 for (int i = 0; i < _n_gens; i++) {
975 _gens[i]->oop_iterate(cl); 854 _gens[i]->oop_iterate(cl);
976 } 855 }
977 } 856 }
978 857
979 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { 858 void GenCollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
980 for (int i = 0; i < _n_gens; i++) { 859 for (int i = 0; i < _n_gens; i++) {
981 _gens[i]->oop_iterate(mr, cl); 860 _gens[i]->oop_iterate(mr, cl);
982 } 861 }
983 } 862 }
984 863
985 void GenCollectedHeap::object_iterate(ObjectClosure* cl) { 864 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
986 for (int i = 0; i < _n_gens; i++) { 865 for (int i = 0; i < _n_gens; i++) {
987 _gens[i]->object_iterate(cl); 866 _gens[i]->object_iterate(cl);
988 } 867 }
989 perm_gen()->object_iterate(cl);
990 } 868 }
991 869
992 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { 870 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
993 for (int i = 0; i < _n_gens; i++) { 871 for (int i = 0; i < _n_gens; i++) {
994 _gens[i]->safe_object_iterate(cl); 872 _gens[i]->safe_object_iterate(cl);
995 } 873 }
996 perm_gen()->safe_object_iterate(cl);
997 } 874 }
998 875
999 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 876 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1000 for (int i = 0; i < _n_gens; i++) { 877 for (int i = 0; i < _n_gens; i++) {
1001 _gens[i]->object_iterate_since_last_GC(cl); 878 _gens[i]->object_iterate_since_last_GC(cl);
1005 Space* GenCollectedHeap::space_containing(const void* addr) const { 882 Space* GenCollectedHeap::space_containing(const void* addr) const {
1006 for (int i = 0; i < _n_gens; i++) { 883 for (int i = 0; i < _n_gens; i++) {
1007 Space* res = _gens[i]->space_containing(addr); 884 Space* res = _gens[i]->space_containing(addr);
1008 if (res != NULL) return res; 885 if (res != NULL) return res;
1009 } 886 }
1010 Space* res = perm_gen()->space_containing(addr);
1011 if (res != NULL) return res;
1012 // Otherwise... 887 // Otherwise...
1013 assert(false, "Could not find containing space"); 888 assert(false, "Could not find containing space");
1014 return NULL; 889 return NULL;
1015 } 890 }
1016 891
1022 assert(_gens[i]->is_in(addr), 897 assert(_gens[i]->is_in(addr),
1023 "addr should be in allocated part of generation"); 898 "addr should be in allocated part of generation");
1024 return _gens[i]->block_start(addr); 899 return _gens[i]->block_start(addr);
1025 } 900 }
1026 } 901 }
1027 if (perm_gen()->is_in_reserved(addr)) {
1028 assert(perm_gen()->is_in(addr),
1029 "addr should be in allocated part of perm gen");
1030 return perm_gen()->block_start(addr);
1031 }
1032 assert(false, "Some generation should contain the address"); 902 assert(false, "Some generation should contain the address");
1033 return NULL; 903 return NULL;
1034 } 904 }
1035 905
1036 size_t GenCollectedHeap::block_size(const HeapWord* addr) const { 906 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1040 assert(_gens[i]->is_in(addr), 910 assert(_gens[i]->is_in(addr),
1041 "addr should be in allocated part of generation"); 911 "addr should be in allocated part of generation");
1042 return _gens[i]->block_size(addr); 912 return _gens[i]->block_size(addr);
1043 } 913 }
1044 } 914 }
1045 if (perm_gen()->is_in_reserved(addr)) {
1046 assert(perm_gen()->is_in(addr),
1047 "addr should be in allocated part of perm gen");
1048 return perm_gen()->block_size(addr);
1049 }
1050 assert(false, "Some generation should contain the address"); 915 assert(false, "Some generation should contain the address");
1051 return 0; 916 return 0;
1052 } 917 }
1053 918
1054 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { 919 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1056 assert(block_start(addr) == addr, "addr must be a block start"); 921 assert(block_start(addr) == addr, "addr must be a block start");
1057 for (int i = 0; i < _n_gens; i++) { 922 for (int i = 0; i < _n_gens; i++) {
1058 if (_gens[i]->is_in_reserved(addr)) { 923 if (_gens[i]->is_in_reserved(addr)) {
1059 return _gens[i]->block_is_obj(addr); 924 return _gens[i]->block_is_obj(addr);
1060 } 925 }
1061 }
1062 if (perm_gen()->is_in_reserved(addr)) {
1063 return perm_gen()->block_is_obj(addr);
1064 } 926 }
1065 assert(false, "Some generation should contain the address"); 927 assert(false, "Some generation should contain the address");
1066 return false; 928 return false;
1067 } 929 }
1068 930
1162 1024
1163 void GenCollectedHeap::prepare_for_verify() { 1025 void GenCollectedHeap::prepare_for_verify() {
1164 ensure_parsability(false); // no need to retire TLABs 1026 ensure_parsability(false); // no need to retire TLABs
1165 GenPrepareForVerifyClosure blk; 1027 GenPrepareForVerifyClosure blk;
1166 generation_iterate(&blk, false); 1028 generation_iterate(&blk, false);
1167 perm_gen()->prepare_for_verify();
1168 } 1029 }
1169 1030
1170 1031
1171 void GenCollectedHeap::generation_iterate(GenClosure* cl, 1032 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1172 bool old_to_young) { 1033 bool old_to_young) {
1183 1044
1184 void GenCollectedHeap::space_iterate(SpaceClosure* cl) { 1045 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1185 for (int i = 0; i < _n_gens; i++) { 1046 for (int i = 0; i < _n_gens; i++) {
1186 _gens[i]->space_iterate(cl, true); 1047 _gens[i]->space_iterate(cl, true);
1187 } 1048 }
1188 perm_gen()->space_iterate(cl, true);
1189 } 1049 }
1190 1050
1191 bool GenCollectedHeap::is_maximal_no_gc() const { 1051 bool GenCollectedHeap::is_maximal_no_gc() const {
1192 for (int i = 0; i < _n_gens; i++) { // skip perm gen 1052 for (int i = 0; i < _n_gens; i++) {
1193 if (!_gens[i]->is_maximal_no_gc()) { 1053 if (!_gens[i]->is_maximal_no_gc()) {
1194 return false; 1054 return false;
1195 } 1055 }
1196 } 1056 }
1197 return true; 1057 return true;
1199 1059
1200 void GenCollectedHeap::save_marks() { 1060 void GenCollectedHeap::save_marks() {
1201 for (int i = 0; i < _n_gens; i++) { 1061 for (int i = 0; i < _n_gens; i++) {
1202 _gens[i]->save_marks(); 1062 _gens[i]->save_marks();
1203 } 1063 }
1204 perm_gen()->save_marks();
1205 } 1064 }
1206 1065
1207 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { 1066 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1208 for (int i = 0; i <= collectedGen; i++) { 1067 for (int i = 0; i <= collectedGen; i++) {
1209 _gens[i]->compute_new_size(); 1068 _gens[i]->compute_new_size();
1230 GCStats* GenCollectedHeap::gc_stats(int level) const { 1089 GCStats* GenCollectedHeap::gc_stats(int level) const {
1231 return _gens[level]->gc_stats(); 1090 return _gens[level]->gc_stats();
1232 } 1091 }
1233 1092
1234 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { 1093 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1235 if (!silent) {
1236 gclog_or_tty->print("permgen ");
1237 }
1238 perm_gen()->verify();
1239 for (int i = _n_gens-1; i >= 0; i--) { 1094 for (int i = _n_gens-1; i >= 0; i--) {
1240 Generation* g = _gens[i]; 1095 Generation* g = _gens[i];
1241 if (!silent) { 1096 if (!silent) {
1242 gclog_or_tty->print(g->name()); 1097 gclog_or_tty->print(g->name());
1243 gclog_or_tty->print(" "); 1098 gclog_or_tty->print(" ");
1252 1107
1253 void GenCollectedHeap::print_on(outputStream* st) const { 1108 void GenCollectedHeap::print_on(outputStream* st) const {
1254 for (int i = 0; i < _n_gens; i++) { 1109 for (int i = 0; i < _n_gens; i++) {
1255 _gens[i]->print_on(st); 1110 _gens[i]->print_on(st);
1256 } 1111 }
1257 perm_gen()->print_on(st); 1112 MetaspaceAux::print_on(st);
1258 } 1113 }
1259 1114
1260 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { 1115 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1261 if (workers() != NULL) { 1116 if (workers() != NULL) {
1262 workers()->threads_do(tc); 1117 workers()->threads_do(tc);
1300 "(" SIZE_FORMAT "K)", 1155 "(" SIZE_FORMAT "K)",
1301 prev_used / K, used() / K, capacity() / K); 1156 prev_used / K, used() / K, capacity() / K);
1302 } 1157 }
1303 } 1158 }
1304 1159
1305 //New method to print perm gen info with PrintGCDetails flag
1306 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1307 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1308 perm_gen()->print_heap_change(perm_prev_used);
1309 gclog_or_tty->print("]");
1310 }
1311
1312 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { 1160 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1313 private: 1161 private:
1314 bool _full; 1162 bool _full;
1315 public: 1163 public:
1316 void do_generation(Generation* gen) { 1164 void do_generation(Generation* gen) {
1330 // Call allocation profiler 1178 // Call allocation profiler
1331 AllocationProfiler::iterate_since_last_gc(); 1179 AllocationProfiler::iterate_since_last_gc();
1332 // Walk generations 1180 // Walk generations
1333 GenGCPrologueClosure blk(full); 1181 GenGCPrologueClosure blk(full);
1334 generation_iterate(&blk, false); // not old-to-young. 1182 generation_iterate(&blk, false); // not old-to-young.
1335 perm_gen()->gc_prologue(full);
1336 }; 1183 };
1337 1184
1338 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { 1185 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1339 private: 1186 private:
1340 bool _full; 1187 bool _full;
1354 1201
1355 resize_all_tlabs(); 1202 resize_all_tlabs();
1356 1203
1357 GenGCEpilogueClosure blk(full); 1204 GenGCEpilogueClosure blk(full);
1358 generation_iterate(&blk, false); // not old-to-young. 1205 generation_iterate(&blk, false); // not old-to-young.
1359 perm_gen()->gc_epilogue(full);
1360 1206
1361 if (!CleanChunkPoolAsync) { 1207 if (!CleanChunkPoolAsync) {
1362 Chunk::clean_chunk_pool(); 1208 Chunk::clean_chunk_pool();
1363 } 1209 }
1210
1211 MetaspaceCounters::update_performance_counters();
1364 1212
1365 always_do_update_barrier = UseConcMarkSweepGC; 1213 always_do_update_barrier = UseConcMarkSweepGC;
1366 }; 1214 };
1367 1215
1368 #ifndef PRODUCT 1216 #ifndef PRODUCT
1376 1224
1377 void GenCollectedHeap::record_gen_tops_before_GC() { 1225 void GenCollectedHeap::record_gen_tops_before_GC() {
1378 if (ZapUnusedHeapArea) { 1226 if (ZapUnusedHeapArea) {
1379 GenGCSaveTopsBeforeGCClosure blk; 1227 GenGCSaveTopsBeforeGCClosure blk;
1380 generation_iterate(&blk, false); // not old-to-young. 1228 generation_iterate(&blk, false); // not old-to-young.
1381 perm_gen()->record_spaces_top();
1382 } 1229 }
1383 } 1230 }
1384 #endif // not PRODUCT 1231 #endif // not PRODUCT
1385 1232
1386 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { 1233 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1392 1239
1393 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { 1240 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1394 CollectedHeap::ensure_parsability(retire_tlabs); 1241 CollectedHeap::ensure_parsability(retire_tlabs);
1395 GenEnsureParsabilityClosure ep_cl; 1242 GenEnsureParsabilityClosure ep_cl;
1396 generation_iterate(&ep_cl, false); 1243 generation_iterate(&ep_cl, false);
1397 perm_gen()->ensure_parsability();
1398 } 1244 }
1399 1245
1400 oop GenCollectedHeap::handle_failed_promotion(Generation* gen, 1246 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1401 oop obj, 1247 oop obj,
1402 size_t obj_size) { 1248 size_t obj_size) {
1445 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1291 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1446 GenTimeOfLastGCClosure tolgc_cl(now); 1292 GenTimeOfLastGCClosure tolgc_cl(now);
1447 // iterate over generations getting the oldest 1293 // iterate over generations getting the oldest
1448 // time that a generation was collected 1294 // time that a generation was collected
1449 generation_iterate(&tolgc_cl, false); 1295 generation_iterate(&tolgc_cl, false);
1450 tolgc_cl.do_generation(perm_gen());
1451 1296
1452 // javaTimeNanos() is guaranteed to be monotonically non-decreasing 1297 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1453 // provided the underlying platform provides such a time source 1298 // provided the underlying platform provides such a time source
1454 // (and it is bug free). So we still have to guard against getting 1299 // (and it is bug free). So we still have to guard against getting
1455 // back a time later than 'now'. 1300 // back a time later than 'now'.