comparison src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents d2a62e0f25eb
children e861d44e0c9c
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
34 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp" 34 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
35 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 35 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
36 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" 36 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
37 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 37 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
38 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 38 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
39 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
40 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 39 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
41 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 40 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
42 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 41 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
43 #include "gc_implementation/shared/isGCActiveMark.hpp" 42 #include "gc_implementation/shared/isGCActiveMark.hpp"
44 #include "gc_interface/gcCause.hpp" 43 #include "gc_interface/gcCause.hpp"
45 #include "memory/gcLocker.inline.hpp" 44 #include "memory/gcLocker.inline.hpp"
46 #include "memory/referencePolicy.hpp" 45 #include "memory/referencePolicy.hpp"
47 #include "memory/referenceProcessor.hpp" 46 #include "memory/referenceProcessor.hpp"
48 #include "oops/methodDataOop.hpp" 47 #include "oops/methodData.hpp"
49 #include "oops/oop.inline.hpp" 48 #include "oops/oop.inline.hpp"
50 #include "oops/oop.pcgc.inline.hpp" 49 #include "oops/oop.pcgc.inline.hpp"
51 #include "runtime/fprofiler.hpp" 50 #include "runtime/fprofiler.hpp"
52 #include "runtime/safepoint.hpp" 51 #include "runtime/safepoint.hpp"
53 #include "runtime/vmThread.hpp" 52 #include "runtime/vmThread.hpp"
88 87
89 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; 88 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
90 bool PSParallelCompact::_print_phases = false; 89 bool PSParallelCompact::_print_phases = false;
91 90
92 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; 91 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
93 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL; 92 Klass* PSParallelCompact::_updated_int_array_klass_obj = NULL;
94 93
95 double PSParallelCompact::_dwl_mean; 94 double PSParallelCompact::_dwl_mean;
96 double PSParallelCompact::_dwl_std_dev; 95 double PSParallelCompact::_dwl_std_dev;
97 double PSParallelCompact::_dwl_first_term; 96 double PSParallelCompact::_dwl_first_term;
98 double PSParallelCompact::_dwl_adjustment; 97 double PSParallelCompact::_dwl_adjustment;
104 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL; 103 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
105 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL; 104 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
106 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL; 105 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
107 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL; 106 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
108 size_t PSParallelCompact::_live_oops_index = 0; 107 size_t PSParallelCompact::_live_oops_index = 0;
109 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
110 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL; 108 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
111 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL; 109 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
112 bool PSParallelCompact::_pointer_tracking = false; 110 bool PSParallelCompact::_pointer_tracking = false;
113 bool PSParallelCompact::_root_tracking = true; 111 bool PSParallelCompact::_root_tracking = true;
114 112
186 #endif // #ifdef ASSERT 184 #endif // #ifdef ASSERT
187 185
188 186
189 #ifndef PRODUCT 187 #ifndef PRODUCT
190 const char* PSParallelCompact::space_names[] = { 188 const char* PSParallelCompact::space_names[] = {
191 "perm", "old ", "eden", "from", "to " 189 "old ", "eden", "from", "to "
192 }; 190 };
193 191
194 void PSParallelCompact::print_region_ranges() 192 void PSParallelCompact::print_region_ranges()
195 { 193 {
196 tty->print_cr("space bottom top end new_top"); 194 tty->print_cr("space bottom top end new_top");
345 } 343 }
346 344
347 void 345 void
348 print_initial_summary_data(ParallelCompactData& summary_data, 346 print_initial_summary_data(ParallelCompactData& summary_data,
349 SpaceInfo* space_info) { 347 SpaceInfo* space_info) {
350 unsigned int id = PSParallelCompact::perm_space_id; 348 unsigned int id = PSParallelCompact::old_space_id;
351 const MutableSpace* space; 349 const MutableSpace* space;
352 do { 350 do {
353 space = space_info[id].space(); 351 space = space_info[id].space();
354 print_initial_summary_data(summary_data, space); 352 print_initial_summary_data(summary_data, space);
355 } while (++id < PSParallelCompact::eden_space_id); 353 } while (++id < PSParallelCompact::eden_space_id);
478 476
479 // First region. 477 // First region.
480 const size_t beg_ofs = region_offset(addr); 478 const size_t beg_ofs = region_offset(addr);
481 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs); 479 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
482 480
483 klassOop klass = ((oop)addr)->klass(); 481 Klass* klass = ((oop)addr)->klass();
484 // Middle regions--completely spanned by this object. 482 // Middle regions--completely spanned by this object.
485 for (size_t region = beg_region + 1; region < end_region; ++region) { 483 for (size_t region = beg_region + 1; region < end_region; ++region) {
486 _region_data[region].set_partial_obj_size(RegionSize); 484 _region_data[region].set_partial_obj_size(RegionSize);
487 _region_data[region].set_partial_obj_addr(addr); 485 _region_data[region].set_partial_obj_addr(addr);
488 } 486 }
763 result += partial_obj_size + live_to_left; 761 result += partial_obj_size + live_to_left;
764 DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);) 762 DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
765 return result; 763 return result;
766 } 764 }
767 765
768 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
769 klassOop updated_klass;
770 if (PSParallelCompact::should_update_klass(old_klass)) {
771 updated_klass = (klassOop) calc_new_pointer(old_klass);
772 } else {
773 updated_klass = old_klass;
774 }
775
776 return updated_klass;
777 }
778
779 #ifdef ASSERT 766 #ifdef ASSERT
780 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace) 767 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
781 { 768 {
782 const size_t* const beg = (const size_t*)vspace->committed_low_addr(); 769 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
783 const size_t* const end = (const size_t*)vspace->committed_high_addr(); 770 const size_t* const end = (const size_t*)vspace->committed_high_addr();
815 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } 802 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
816 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } 803 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
817 804
818 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); 805 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
819 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); 806 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
807 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
820 808
821 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } 809 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
822 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } 810 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
823 811
824 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } 812 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
825 813
826 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } 814 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) {
815 mark_and_push(_compaction_manager, p);
816 }
827 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } 817 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
818
819 void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
820 klass->oops_do(_mark_and_push_closure);
821 }
822 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
823 klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure);
824 }
828 825
829 void PSParallelCompact::post_initialize() { 826 void PSParallelCompact::post_initialize() {
830 ParallelScavengeHeap* heap = gc_heap(); 827 ParallelScavengeHeap* heap = gc_heap();
831 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 828 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
832 829
878 { 875 {
879 memset(&_space_info, 0, sizeof(_space_info)); 876 memset(&_space_info, 0, sizeof(_space_info));
880 877
881 ParallelScavengeHeap* heap = gc_heap(); 878 ParallelScavengeHeap* heap = gc_heap();
882 PSYoungGen* young_gen = heap->young_gen(); 879 PSYoungGen* young_gen = heap->young_gen();
883 MutableSpace* perm_space = heap->perm_gen()->object_space(); 880
884
885 _space_info[perm_space_id].set_space(perm_space);
886 _space_info[old_space_id].set_space(heap->old_gen()->object_space()); 881 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
887 _space_info[eden_space_id].set_space(young_gen->eden_space()); 882 _space_info[eden_space_id].set_space(young_gen->eden_space());
888 _space_info[from_space_id].set_space(young_gen->from_space()); 883 _space_info[from_space_id].set_space(young_gen->from_space());
889 _space_info[to_space_id].set_space(young_gen->to_space()); 884 _space_info[to_space_id].set_space(young_gen->to_space());
890 885
891 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
892 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); 886 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
893
894 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
895 if (TraceParallelOldGCDensePrefix) {
896 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
897 _space_info[perm_space_id].min_dense_prefix());
898 }
899 } 887 }
900 888
901 void PSParallelCompact::initialize_dead_wood_limiter() 889 void PSParallelCompact::initialize_dead_wood_limiter()
902 { 890 {
903 const size_t max = 100; 891 const size_t max = 100;
917 905
918 void fill(ParallelScavengeHeap* heap) { 906 void fill(ParallelScavengeHeap* heap) {
919 _heap_used = heap->used(); 907 _heap_used = heap->used();
920 _young_gen_used = heap->young_gen()->used_in_bytes(); 908 _young_gen_used = heap->young_gen()->used_in_bytes();
921 _old_gen_used = heap->old_gen()->used_in_bytes(); 909 _old_gen_used = heap->old_gen()->used_in_bytes();
922 _perm_gen_used = heap->perm_gen()->used_in_bytes(); 910 _metadata_used = MetaspaceAux::used_in_bytes();
923 }; 911 };
924 912
925 size_t heap_used() const { return _heap_used; } 913 size_t heap_used() const { return _heap_used; }
926 size_t young_gen_used() const { return _young_gen_used; } 914 size_t young_gen_used() const { return _young_gen_used; }
927 size_t old_gen_used() const { return _old_gen_used; } 915 size_t old_gen_used() const { return _old_gen_used; }
928 size_t perm_gen_used() const { return _perm_gen_used; } 916 size_t metadata_used() const { return _metadata_used; }
929 917
930 private: 918 private:
931 size_t _heap_used; 919 size_t _heap_used;
932 size_t _young_gen_used; 920 size_t _young_gen_used;
933 size_t _old_gen_used; 921 size_t _old_gen_used;
934 size_t _perm_gen_used; 922 size_t _metadata_used;
935 }; 923 };
936 924
937 void 925 void
938 PSParallelCompact::clear_data_covering_space(SpaceId id) 926 PSParallelCompact::clear_data_covering_space(SpaceId id)
939 { 927 {
974 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); 962 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
975 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); 963 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
976 964
977 pre_gc_values->fill(heap); 965 pre_gc_values->fill(heap);
978 966
979 ParCompactionManager::reset();
980 NOT_PRODUCT(_mark_bitmap.reset_counters()); 967 NOT_PRODUCT(_mark_bitmap.reset_counters());
981 DEBUG_ONLY(add_obj_count = add_obj_size = 0;) 968 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
982 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) 969 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
983 970
984 // Increment the invocation count 971 // Increment the invocation count
1001 988
1002 // Verify object start arrays 989 // Verify object start arrays
1003 if (VerifyObjectStartArray && 990 if (VerifyObjectStartArray &&
1004 VerifyBeforeGC) { 991 VerifyBeforeGC) {
1005 heap->old_gen()->verify_object_start_array(); 992 heap->old_gen()->verify_object_start_array();
1006 heap->perm_gen()->verify_object_start_array();
1007 } 993 }
1008 994
1009 DEBUG_ONLY(mark_bitmap()->verify_clear();) 995 DEBUG_ONLY(mark_bitmap()->verify_clear();)
1010 DEBUG_ONLY(summary_data().verify_clear();) 996 DEBUG_ONLY(summary_data().verify_clear();)
1011 997
1015 1001
1016 void PSParallelCompact::post_compact() 1002 void PSParallelCompact::post_compact()
1017 { 1003 {
1018 TraceTime tm("post compact", print_phases(), true, gclog_or_tty); 1004 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
1019 1005
1020 for (unsigned int id = perm_space_id; id < last_space_id; ++id) { 1006 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1021 // Clear the marking bitmap, summary data and split info. 1007 // Clear the marking bitmap, summary data and split info.
1022 clear_data_covering_space(SpaceId(id)); 1008 clear_data_covering_space(SpaceId(id));
1023 // Update top(). Must be done after clearing the bitmap and summary data. 1009 // Update top(). Must be done after clearing the bitmap and summary data.
1024 _space_info[id].publish_new_top(); 1010 _space_info[id].publish_new_top();
1025 } 1011 }
1044 1030
1045 BarrierSet* bs = heap->barrier_set(); 1031 BarrierSet* bs = heap->barrier_set();
1046 if (bs->is_a(BarrierSet::ModRef)) { 1032 if (bs->is_a(BarrierSet::ModRef)) {
1047 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; 1033 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1048 MemRegion old_mr = heap->old_gen()->reserved(); 1034 MemRegion old_mr = heap->old_gen()->reserved();
1049 MemRegion perm_mr = heap->perm_gen()->reserved();
1050 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
1051 1035
1052 if (young_gen_empty) { 1036 if (young_gen_empty) {
1053 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); 1037 modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1054 } else { 1038 } else {
1055 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); 1039 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1056 } 1040 }
1057 } 1041 }
1042
1043 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1044 ClassLoaderDataGraph::purge();
1058 1045
1059 Threads::gc_epilogue(); 1046 Threads::gc_epilogue();
1060 CodeCache::gc_epilogue(); 1047 CodeCache::gc_epilogue();
1061 JvmtiExport::gc_epilogue(); 1048 JvmtiExport::gc_epilogue();
1062 1049
1407 const size_t space_live = pointer_delta(new_top, bottom); 1394 const size_t space_live = pointer_delta(new_top, bottom);
1408 const size_t space_used = space->used_in_words(); 1395 const size_t space_used = space->used_in_words();
1409 const size_t space_capacity = space->capacity_in_words(); 1396 const size_t space_capacity = space->capacity_in_words();
1410 1397
1411 const double density = double(space_live) / double(space_capacity); 1398 const double density = double(space_live) / double(space_capacity);
1412 const size_t min_percent_free = 1399 const size_t min_percent_free = MarkSweepDeadRatio;
1413 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1414 const double limiter = dead_wood_limiter(density, min_percent_free); 1400 const double limiter = dead_wood_limiter(density, min_percent_free);
1415 const size_t dead_wood_max = space_used - space_live; 1401 const size_t dead_wood_max = space_used - space_live;
1416 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter), 1402 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1417 dead_wood_max); 1403 dead_wood_max);
1418 1404
1866 } 1852 }
1867 } 1853 }
1868 1854
1869 // The amount of live data that will end up in old space (assuming it fits). 1855 // The amount of live data that will end up in old space (assuming it fits).
1870 size_t old_space_total_live = 0; 1856 size_t old_space_total_live = 0;
1871 assert(perm_space_id < old_space_id, "should not count perm data here");
1872 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1857 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1873 old_space_total_live += pointer_delta(_space_info[id].new_top(), 1858 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1874 _space_info[id].space()->bottom()); 1859 _space_info[id].space()->bottom());
1875 } 1860 }
1876 1861
1884 if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) { 1869 if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
1885 provoke_split(maximum_compaction); 1870 provoke_split(maximum_compaction);
1886 } 1871 }
1887 #endif // #ifndef PRODUCT 1872 #endif // #ifndef PRODUCT
1888 1873
1889 // Permanent and Old generations. 1874 // Old generations.
1890 summarize_space(perm_space_id, maximum_compaction);
1891 summarize_space(old_space_id, maximum_compaction); 1875 summarize_space(old_space_id, maximum_compaction);
1892 1876
1893 // Summarize the remaining spaces in the young gen. The initial target space 1877 // Summarize the remaining spaces in the young gen. The initial target space
1894 // is the old gen. If a space does not fit entirely into the target, then the 1878 // is the old gen. If a space does not fit entirely into the target, then the
1895 // remainder is compacted into the space itself and that space becomes the new 1879 // remainder is compacted into the space itself and that space becomes the new
2011 1995
2012 ParallelScavengeHeap* heap = gc_heap(); 1996 ParallelScavengeHeap* heap = gc_heap();
2013 GCCause::Cause gc_cause = heap->gc_cause(); 1997 GCCause::Cause gc_cause = heap->gc_cause();
2014 PSYoungGen* young_gen = heap->young_gen(); 1998 PSYoungGen* young_gen = heap->young_gen();
2015 PSOldGen* old_gen = heap->old_gen(); 1999 PSOldGen* old_gen = heap->old_gen();
2016 PSPermGen* perm_gen = heap->perm_gen();
2017 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 2000 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2018 2001
2019 // The scope of casr should end after code that can change 2002 // The scope of casr should end after code that can change
2020 // CollectorPolicy::_should_clear_all_soft_refs. 2003 // CollectorPolicy::_should_clear_all_soft_refs.
2021 ClearedAllSoftRefs casr(maximum_heap_compaction, 2004 ClearedAllSoftRefs casr(maximum_heap_compaction,
2060 if (TraceGen1Time) accumulated_time()->start(); 2043 if (TraceGen1Time) accumulated_time()->start();
2061 2044
2062 // Let the size policy know we're starting 2045 // Let the size policy know we're starting
2063 size_policy->major_collection_begin(); 2046 size_policy->major_collection_begin();
2064 2047
2065 // When collecting the permanent generation methodOops may be moving,
2066 // so we either have to flush all bcp data or convert it into bci.
2067 CodeCache::gc_prologue(); 2048 CodeCache::gc_prologue();
2068 Threads::gc_prologue(); 2049 Threads::gc_prologue();
2069 2050
2070 COMPILER2_PRESENT(DerivedPointerTable::clear()); 2051 COMPILER2_PRESENT(DerivedPointerTable::clear());
2071 2052
2096 // adjust_roots() updates Universe::_intArrayKlassObj which is 2077 // adjust_roots() updates Universe::_intArrayKlassObj which is
2097 // needed by the compaction for filling holes in the dense prefix. 2078 // needed by the compaction for filling holes in the dense prefix.
2098 adjust_roots(); 2079 adjust_roots();
2099 2080
2100 compaction_start.update(); 2081 compaction_start.update();
2101 // Does the perm gen always have to be done serially because
2102 // klasses are used in the update of an object?
2103 compact_perm(vmthread_cm);
2104
2105 compact(); 2082 compact();
2106 2083
2107 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be 2084 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
2108 // done before resizing. 2085 // done before resizing.
2109 post_compact(); 2086 post_compact();
2116 gclog_or_tty->print("AdaptiveSizeStart: "); 2093 gclog_or_tty->print("AdaptiveSizeStart: ");
2117 gclog_or_tty->stamp(); 2094 gclog_or_tty->stamp();
2118 gclog_or_tty->print_cr(" collection: %d ", 2095 gclog_or_tty->print_cr(" collection: %d ",
2119 heap->total_collections()); 2096 heap->total_collections());
2120 if (Verbose) { 2097 if (Verbose) {
2121 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 2098 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
2122 " perm_gen_capacity: %d ", 2099 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
2123 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
2124 perm_gen->capacity_in_bytes());
2125 } 2100 }
2126 } 2101 }
2127 2102
2128 // Don't check if the size_policy is ready here. Let 2103 // Don't check if the size_policy is ready here. Let
2129 // the size_policy check that internally. 2104 // the size_policy check that internally.
2140 young_gen->to_space()->capacity_in_bytes(); 2115 young_gen->to_space()->capacity_in_bytes();
2141 size_policy->compute_generation_free_space( 2116 size_policy->compute_generation_free_space(
2142 young_gen->used_in_bytes(), 2117 young_gen->used_in_bytes(),
2143 young_gen->eden_space()->used_in_bytes(), 2118 young_gen->eden_space()->used_in_bytes(),
2144 old_gen->used_in_bytes(), 2119 old_gen->used_in_bytes(),
2145 perm_gen->used_in_bytes(),
2146 young_gen->eden_space()->capacity_in_bytes(), 2120 young_gen->eden_space()->capacity_in_bytes(),
2147 old_gen->max_gen_size(), 2121 old_gen->max_gen_size(),
2148 max_eden_size, 2122 max_eden_size,
2149 true /* full gc*/, 2123 true /* full gc*/,
2150 gc_cause, 2124 gc_cause,
2173 counters->update_young_capacity(young_gen->capacity_in_bytes()); 2147 counters->update_young_capacity(young_gen->capacity_in_bytes());
2174 } 2148 }
2175 2149
2176 heap->resize_all_tlabs(); 2150 heap->resize_all_tlabs();
2177 2151
2178 // We collected the perm gen, so we'll resize it here. 2152 // Resize the metaspace capactiy after a collection
2179 perm_gen->compute_new_size(pre_gc_values.perm_gen_used()); 2153 MetaspaceGC::compute_new_size();
2180 2154
2181 if (TraceGen1Time) accumulated_time()->stop(); 2155 if (TraceGen1Time) accumulated_time()->stop();
2182 2156
2183 if (PrintGC) { 2157 if (PrintGC) {
2184 if (PrintGCDetails) { 2158 if (PrintGCDetails) {
2185 // No GC timestamp here. This is after GC so it would be confusing. 2159 // No GC timestamp here. This is after GC so it would be confusing.
2186 young_gen->print_used_change(pre_gc_values.young_gen_used()); 2160 young_gen->print_used_change(pre_gc_values.young_gen_used());
2187 old_gen->print_used_change(pre_gc_values.old_gen_used()); 2161 old_gen->print_used_change(pre_gc_values.old_gen_used());
2188 heap->print_heap_change(pre_gc_values.heap_used()); 2162 heap->print_heap_change(pre_gc_values.heap_used());
2189 // Print perm gen last (print_heap_change() excludes the perm gen). 2163 MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
2190 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
2191 } else { 2164 } else {
2192 heap->print_heap_change(pre_gc_values.heap_used()); 2165 heap->print_heap_change(pre_gc_values.heap_used());
2193 } 2166 }
2194 } 2167 }
2195 2168
2203 for (size_t i = 0; i < ParallelGCThreads + 1; ++i) { 2176 for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
2204 ParCompactionManager* const cm = 2177 ParCompactionManager* const cm =
2205 ParCompactionManager::manager_array(int(i)); 2178 ParCompactionManager::manager_array(int(i));
2206 assert(cm->marking_stack()->is_empty(), "should be empty"); 2179 assert(cm->marking_stack()->is_empty(), "should be empty");
2207 assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty"); 2180 assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
2208 assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
2209 } 2181 }
2210 #endif // ASSERT 2182 #endif // ASSERT
2211 2183
2212 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 2184 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2213 HandleMark hm; // Discard invalid handles created during verification 2185 HandleMark hm; // Discard invalid handles created during verification
2217 2189
2218 // Re-verify object start arrays 2190 // Re-verify object start arrays
2219 if (VerifyObjectStartArray && 2191 if (VerifyObjectStartArray &&
2220 VerifyAfterGC) { 2192 VerifyAfterGC) {
2221 old_gen->verify_object_start_array(); 2193 old_gen->verify_object_start_array();
2222 perm_gen->verify_object_start_array();
2223 } 2194 }
2224 2195
2225 if (ZapUnusedHeapArea) { 2196 if (ZapUnusedHeapArea) {
2226 old_gen->object_space()->check_mangled_unused_area_complete(); 2197 old_gen->object_space()->check_mangled_unused_area_complete();
2227 perm_gen->object_space()->check_mangled_unused_area_complete();
2228 } 2198 }
2229 2199
2230 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 2200 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2231 2201
2232 collection_exit.update(); 2202 collection_exit.update();
2354 TaskQueueSetSuper* qset = ParCompactionManager::region_array(); 2324 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2355 ParallelTaskTerminator terminator(active_gc_threads, qset); 2325 ParallelTaskTerminator terminator(active_gc_threads, qset);
2356 2326
2357 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); 2327 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2358 PSParallelCompact::FollowStackClosure follow_stack_closure(cm); 2328 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2329
2330 // Need new claim bits before marking starts.
2331 ClassLoaderDataGraph::clear_claimed_marks();
2359 2332
2360 { 2333 {
2361 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); 2334 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
2362 ParallelScavengeHeap::ParStrongRootsScope psrs; 2335 ParallelScavengeHeap::ParStrongRootsScope psrs;
2363 2336
2405 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure, 2378 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
2406 purged_class); 2379 purged_class);
2407 cm->follow_marking_stacks(); // Flush marking stack. 2380 cm->follow_marking_stacks(); // Flush marking stack.
2408 2381
2409 // Update subklass/sibling/implementor links of live klasses 2382 // Update subklass/sibling/implementor links of live klasses
2410 // revisit_klass_stack is used in follow_weak_klass_links(). 2383 Klass::clean_weak_klass_links(is_alive_closure());
2411 follow_weak_klass_links();
2412
2413 // Revisit memoized MDO's and clear any unmarked weak refs
2414 follow_mdo_weak_refs();
2415 2384
2416 // Visit interned string tables and delete unmarked oops 2385 // Visit interned string tables and delete unmarked oops
2417 StringTable::unlink(is_alive_closure()); 2386 StringTable::unlink(is_alive_closure());
2418 // Clean up unreferenced symbols in symbol table. 2387 // Clean up unreferenced symbols in symbol table.
2419 SymbolTable::unlink(); 2388 SymbolTable::unlink();
2420 2389
2421 assert(cm->marking_stacks_empty(), "marking stacks should be empty"); 2390 assert(cm->marking_stacks_empty(), "marking stacks should be empty");
2391 }
2392
2393 void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
2394 ClassLoaderData* cld = klass->class_loader_data();
2395 assert(cld->has_defined(klass), "inconsistency!");
2396
2397 // The actual processing of the klass is done when we
2398 // traverse the list of Klasses in the class loader data.
2399 PSParallelCompact::follow_class_loader(cm, cld);
2400 }
2401
2402 void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) {
2403 ClassLoaderData* cld = klass->class_loader_data();
2404 assert(cld->has_defined(klass), "inconsistency!");
2405
2406 // The actual processing of the klass is done when we
2407 // traverse the list of Klasses in the class loader data.
2408 PSParallelCompact::adjust_class_loader(cm, cld);
2409 }
2410
2411 void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
2412 ClassLoaderData* cld) {
2413 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2414 PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
2415
2416 cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
2417 }
2418
2419 void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
2420 ClassLoaderData* cld) {
2421 cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(),
2422 PSParallelCompact::adjust_klass_closure(),
2423 true);
2422 } 2424 }
2423 2425
2424 // This should be moved to the shared markSweep code! 2426 // This should be moved to the shared markSweep code!
2425 class PSAlwaysTrueClosure: public BoolObjectClosure { 2427 class PSAlwaysTrueClosure: public BoolObjectClosure {
2426 public: 2428 public:
2430 static PSAlwaysTrueClosure always_true; 2432 static PSAlwaysTrueClosure always_true;
2431 2433
2432 void PSParallelCompact::adjust_roots() { 2434 void PSParallelCompact::adjust_roots() {
2433 // Adjust the pointers to reflect the new locations 2435 // Adjust the pointers to reflect the new locations
2434 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); 2436 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
2437
2438 // Need new claim bits when tracing through and adjusting pointers.
2439 ClassLoaderDataGraph::clear_claimed_marks();
2435 2440
2436 // General strong roots. 2441 // General strong roots.
2437 Universe::oops_do(adjust_root_pointer_closure()); 2442 Universe::oops_do(adjust_root_pointer_closure());
2438 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles 2443 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
2439 Threads::oops_do(adjust_root_pointer_closure(), NULL); 2444 Threads::oops_do(adjust_root_pointer_closure(), NULL);
2441 FlatProfiler::oops_do(adjust_root_pointer_closure()); 2446 FlatProfiler::oops_do(adjust_root_pointer_closure());
2442 Management::oops_do(adjust_root_pointer_closure()); 2447 Management::oops_do(adjust_root_pointer_closure());
2443 JvmtiExport::oops_do(adjust_root_pointer_closure()); 2448 JvmtiExport::oops_do(adjust_root_pointer_closure());
2444 // SO_AllClasses 2449 // SO_AllClasses
2445 SystemDictionary::oops_do(adjust_root_pointer_closure()); 2450 SystemDictionary::oops_do(adjust_root_pointer_closure());
2451 ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
2446 2452
2447 // Now adjust pointers in remaining weak roots. (All of which should 2453 // Now adjust pointers in remaining weak roots. (All of which should
2448 // have been cleared if they pointed to non-surviving objects.) 2454 // have been cleared if they pointed to non-surviving objects.)
2449 // Global (weak) JNI handles 2455 // Global (weak) JNI handles
2450 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); 2456 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
2456 // may have been scanned. Process them also. 2462 // may have been scanned. Process them also.
2457 // Should the reference processor have a span that excludes 2463 // Should the reference processor have a span that excludes
2458 // young gen objects? 2464 // young gen objects?
2459 PSScavenge::reference_processor()->weak_oops_do( 2465 PSScavenge::reference_processor()->weak_oops_do(
2460 adjust_root_pointer_closure()); 2466 adjust_root_pointer_closure());
2461 }
2462
2463 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
2464 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
2465 // trace("4");
2466
2467 gc_heap()->perm_gen()->start_array()->reset();
2468 move_and_update(cm, perm_space_id);
2469 } 2467 }
2470 2468
2471 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, 2469 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2472 uint parallel_gc_threads) 2470 uint parallel_gc_threads)
2473 { 2471 {
2499 size_t fillable_regions = 0; // A count for diagnostic purposes. 2497 size_t fillable_regions = 0; // A count for diagnostic purposes.
2500 // A region index which corresponds to the tasks created above. 2498 // A region index which corresponds to the tasks created above.
2501 // "which" must be 0 <= which < task_count 2499 // "which" must be 0 <= which < task_count
2502 2500
2503 which = 0; 2501 which = 0;
2504 for (unsigned int id = to_space_id; id > perm_space_id; --id) { 2502 // id + 1 is used to test termination so unsigned can
2503 // be used with an old_space_id == 0.
2504 for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2505 SpaceInfo* const space_info = _space_info + id; 2505 SpaceInfo* const space_info = _space_info + id;
2506 MutableSpace* const space = space_info->space(); 2506 MutableSpace* const space = space_info->space();
2507 HeapWord* const new_top = space_info->new_top(); 2507 HeapWord* const new_top = space_info->new_top();
2508 2508
2509 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix()); 2509 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2510 const size_t end_region = 2510 const size_t end_region =
2511 sd.addr_to_region_idx(sd.region_align_up(new_top)); 2511 sd.addr_to_region_idx(sd.region_align_up(new_top));
2512 assert(end_region > 0, "perm gen cannot be empty"); 2512
2513 2513 for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2514 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
2515 if (sd.region(cur)->claim_unsafe()) { 2514 if (sd.region(cur)->claim_unsafe()) {
2516 ParCompactionManager::region_list_push(which, cur); 2515 ParCompactionManager::region_list_push(which, cur);
2517 2516
2518 if (TraceParallelOldGCCompactionPhase && Verbose) { 2517 if (TraceParallelOldGCCompactionPhase && Verbose) {
2519 const size_t count_mod_8 = fillable_regions & 7; 2518 const size_t count_mod_8 = fillable_regions & 7;
2660 2659
2661 gc_task_manager()->execute_and_wait(q); 2660 gc_task_manager()->execute_and_wait(q);
2662 2661
2663 #ifdef ASSERT 2662 #ifdef ASSERT
2664 // Verify that all regions have been processed before the deferred updates. 2663 // Verify that all regions have been processed before the deferred updates.
2665 // Note that perm_space_id is skipped; this type of verification is not
2666 // valid until the perm gen is compacted by regions.
2667 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 2664 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2668 verify_complete(SpaceId(id)); 2665 verify_complete(SpaceId(id));
2669 } 2666 }
2670 #endif 2667 #endif
2671 } 2668 }
2720 print_region_ranges(); 2717 print_region_ranges();
2721 } 2718 }
2722 } 2719 }
2723 #endif // #ifdef ASSERT 2720 #endif // #ifdef ASSERT
2724 2721
2725 void
2726 PSParallelCompact::follow_weak_klass_links() {
2727 // All klasses on the revisit stack are marked at this point.
2728 // Update and follow all subklass, sibling and implementor links.
2729 // Check all the stacks here even if not all the workers are active.
2730 // There is no accounting which indicates which stacks might have
2731 // contents to be followed.
2732 if (PrintRevisitStats) {
2733 gclog_or_tty->print_cr("#classes in system dictionary = %d",
2734 SystemDictionary::number_of_classes());
2735 }
2736 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
2737 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2738 KeepAliveClosure keep_alive_closure(cm);
2739 Stack<Klass*, mtGC>* const rks = cm->revisit_klass_stack();
2740 if (PrintRevisitStats) {
2741 gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
2742 i, rks->size());
2743 }
2744 while (!rks->is_empty()) {
2745 Klass* const k = rks->pop();
2746 k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
2747 }
2748
2749 cm->follow_marking_stacks();
2750 }
2751 }
2752
2753 void
2754 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2755 cm->revisit_klass_stack()->push(k);
2756 }
2757
2758 void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
2759 cm->revisit_mdo_stack()->push(p);
2760 }
2761
2762 void PSParallelCompact::follow_mdo_weak_refs() {
2763 // All strongly reachable oops have been marked at this point;
2764 // we can visit and clear any weak references from MDO's which
2765 // we memoized during the strong marking phase.
2766 if (PrintRevisitStats) {
2767 gclog_or_tty->print_cr("#classes in system dictionary = %d",
2768 SystemDictionary::number_of_classes());
2769 }
2770 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
2771 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2772 Stack<DataLayout*, mtGC>* rms = cm->revisit_mdo_stack();
2773 if (PrintRevisitStats) {
2774 gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
2775 i, rms->size());
2776 }
2777 while (!rms->is_empty()) {
2778 rms->pop()->follow_weak_refs(is_alive_closure());
2779 }
2780
2781 cm->follow_marking_stacks();
2782 }
2783 }
2784
2785 2722
2786 #ifdef VALIDATE_MARK_SWEEP 2723 #ifdef VALIDATE_MARK_SWEEP
2787 2724
2788 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { 2725 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
2789 if (!ValidateMarkSweep) 2726 if (!ValidateMarkSweep)
2827 if (ValidateMarkSweep) { 2764 if (ValidateMarkSweep) {
2828 _adjusted_pointers->clear(); 2765 _adjusted_pointers->clear();
2829 _pointer_tracking = true; 2766 _pointer_tracking = true;
2830 2767
2831 AdjusterTracker checker; 2768 AdjusterTracker checker;
2832 obj->oop_iterate(&checker); 2769 obj->oop_iterate_no_header(&checker);
2833 } 2770 }
2834 } 2771 }
2835 2772
2836 2773
2837 void PSParallelCompact::check_interior_pointers() { 2774 void PSParallelCompact::check_interior_pointers() {
2840 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers"); 2777 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2841 } 2778 }
2842 } 2779 }
2843 2780
2844 2781
2845 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) { 2782 void PSParallelCompact::reset_live_oop_tracking() {
2846 if (ValidateMarkSweep) { 2783 if (ValidateMarkSweep) {
2847 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops"); 2784 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2848 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0; 2785 _live_oops_index = 0;
2849 } 2786 }
2850 } 2787 }
2851 2788
2852 2789
2853 void PSParallelCompact::register_live_oop(oop p, size_t size) { 2790 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2993 // heap, last_space_id is returned. In debug mode it expects the address to be 2930 // heap, last_space_id is returned. In debug mode it expects the address to be
2994 // in the heap and asserts such. 2931 // in the heap and asserts such.
2995 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { 2932 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2996 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap"); 2933 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
2997 2934
2998 for (unsigned int id = perm_space_id; id < last_space_id; ++id) { 2935 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2999 if (_space_info[id].space()->contains(addr)) { 2936 if (_space_info[id].space()->contains(addr)) {
3000 return SpaceId(id); 2937 return SpaceId(id);
3001 } 2938 }
3002 } 2939 }
3003 2940
3481 ParMarkBitMapClosure::IterationStatus 3418 ParMarkBitMapClosure::IterationStatus
3482 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) { 3419 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3483 do_addr(addr); 3420 do_addr(addr);
3484 return ParMarkBitMap::incomplete; 3421 return ParMarkBitMap::incomplete;
3485 } 3422 }
3486
3487 // Prepare for compaction. This method is executed once
3488 // (i.e., by a single thread) before compaction.
3489 // Save the updated location of the intArrayKlassObj for
3490 // filling holes in the dense prefix.
3491 void PSParallelCompact::compact_prologue() {
3492 _updated_int_array_klass_obj = (klassOop)
3493 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3494 }