Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @ 10405:f2110083203d
8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>
author | sla |
---|---|
date | Mon, 10 Jun 2013 11:30:51 +0200 |
parents | 001ec9515f84 |
children | 71180a6e5080 |
comparison
equal
deleted
inserted
replaced
10404:d0add7016434 | 10405:f2110083203d |
---|---|
23 */ | 23 */ |
24 | 24 |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP | 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |
27 | 27 |
28 #include "gc_implementation/shared/gcHeapSummary.hpp" | |
28 #include "gc_implementation/shared/gSpaceCounters.hpp" | 29 #include "gc_implementation/shared/gSpaceCounters.hpp" |
29 #include "gc_implementation/shared/gcStats.hpp" | 30 #include "gc_implementation/shared/gcStats.hpp" |
31 #include "gc_implementation/shared/gcWhen.hpp" | |
30 #include "gc_implementation/shared/generationCounters.hpp" | 32 #include "gc_implementation/shared/generationCounters.hpp" |
31 #include "memory/freeBlockDictionary.hpp" | 33 #include "memory/freeBlockDictionary.hpp" |
32 #include "memory/generation.hpp" | 34 #include "memory/generation.hpp" |
33 #include "runtime/mutexLocker.hpp" | 35 #include "runtime/mutexLocker.hpp" |
34 #include "runtime/virtualspace.hpp" | 36 #include "runtime/virtualspace.hpp" |
51 // means of a sliding mark-compact. | 53 // means of a sliding mark-compact. |
52 | 54 |
53 class CMSAdaptiveSizePolicy; | 55 class CMSAdaptiveSizePolicy; |
54 class CMSConcMarkingTask; | 56 class CMSConcMarkingTask; |
55 class CMSGCAdaptivePolicyCounters; | 57 class CMSGCAdaptivePolicyCounters; |
58 class CMSTracer; | |
59 class ConcurrentGCTimer; | |
56 class ConcurrentMarkSweepGeneration; | 60 class ConcurrentMarkSweepGeneration; |
57 class ConcurrentMarkSweepPolicy; | 61 class ConcurrentMarkSweepPolicy; |
58 class ConcurrentMarkSweepThread; | 62 class ConcurrentMarkSweepThread; |
59 class CompactibleFreeListSpace; | 63 class CompactibleFreeListSpace; |
60 class FreeChunk; | 64 class FreeChunk; |
61 class PromotionInfo; | 65 class PromotionInfo; |
62 class ScanMarkedObjectsAgainCarefullyClosure; | 66 class ScanMarkedObjectsAgainCarefullyClosure; |
63 class TenuredGeneration; | 67 class TenuredGeneration; |
68 class SerialOldTracer; | |
64 | 69 |
65 // A generic CMS bit map. It's the basis for both the CMS marking bit map | 70 // A generic CMS bit map. It's the basis for both the CMS marking bit map |
66 // as well as for the mod union table (in each case only a subset of the | 71 // as well as for the mod union table (in each case only a subset of the |
67 // methods are used). This is essentially a wrapper around the BitMap class, | 72 // methods are used). This is essentially a wrapper around the BitMap class, |
68 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, | 73 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, |
565 | 570 |
566 // Initialization Errors | 571 // Initialization Errors |
567 bool _completed_initialization; | 572 bool _completed_initialization; |
568 | 573 |
569 // In support of ExplicitGCInvokesConcurrent | 574 // In support of ExplicitGCInvokesConcurrent |
570 static bool _full_gc_requested; | 575 static bool _full_gc_requested; |
571 unsigned int _collection_count_start; | 576 static GCCause::Cause _full_gc_cause; |
577 unsigned int _collection_count_start; | |
572 | 578 |
573 // Should we unload classes this concurrent cycle? | 579 // Should we unload classes this concurrent cycle? |
574 bool _should_unload_classes; | 580 bool _should_unload_classes; |
575 unsigned int _concurrent_cycles_since_last_unload; | 581 unsigned int _concurrent_cycles_since_last_unload; |
576 unsigned int concurrent_cycles_since_last_unload() const { | 582 unsigned int concurrent_cycles_since_last_unload() const { |
606 elapsedTimer _inter_sweep_timer; // time between sweeps | 612 elapsedTimer _inter_sweep_timer; // time between sweeps |
607 elapsedTimer _intra_sweep_timer; // time _in_ sweeps | 613 elapsedTimer _intra_sweep_timer; // time _in_ sweeps |
608 // padded decaying average estimates of the above | 614 // padded decaying average estimates of the above |
609 AdaptivePaddedAverage _inter_sweep_estimate; | 615 AdaptivePaddedAverage _inter_sweep_estimate; |
610 AdaptivePaddedAverage _intra_sweep_estimate; | 616 AdaptivePaddedAverage _intra_sweep_estimate; |
617 | |
618 CMSTracer* _gc_tracer_cm; | |
619 ConcurrentGCTimer* _gc_timer_cm; | |
620 | |
621 bool _cms_start_registered; | |
622 | |
623 GCHeapSummary _last_heap_summary; | |
624 MetaspaceSummary _last_metaspace_summary; | |
625 | |
626 void register_foreground_gc_start(GCCause::Cause cause); | |
627 void register_gc_start(GCCause::Cause cause); | |
628 void register_gc_end(); | |
629 void save_heap_summary(); | |
630 void report_heap_summary(GCWhen::Type when); | |
611 | 631 |
612 protected: | 632 protected: |
613 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) | 633 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) |
614 MemRegion _span; // span covering above two | 634 MemRegion _span; // span covering above two |
615 CardTableRS* _ct; // card table | 635 CardTableRS* _ct; // card table |
825 // a mark-sweep, after taking over from a possibly on-going | 845 // a mark-sweep, after taking over from a possibly on-going |
826 // concurrent mark-sweep collection. | 846 // concurrent mark-sweep collection. |
827 void do_mark_sweep_work(bool clear_all_soft_refs, | 847 void do_mark_sweep_work(bool clear_all_soft_refs, |
828 CollectorState first_state, bool should_start_over); | 848 CollectorState first_state, bool should_start_over); |
829 | 849 |
850 // Work methods for reporting concurrent mode interruption or failure | |
851 bool is_external_interruption(); | |
852 void report_concurrent_mode_interruption(); | |
853 | |
830 // If the backgrould GC is active, acquire control from the background | 854 // If the backgrould GC is active, acquire control from the background |
831 // GC and do the collection. | 855 // GC and do the collection. |
832 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); | 856 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); |
833 | 857 |
834 // For synchronizing passing of control from background to foreground | 858 // For synchronizing passing of control from background to foreground |
874 | 898 |
875 void collect(bool full, | 899 void collect(bool full, |
876 bool clear_all_soft_refs, | 900 bool clear_all_soft_refs, |
877 size_t size, | 901 size_t size, |
878 bool tlab); | 902 bool tlab); |
879 void collect_in_background(bool clear_all_soft_refs); | 903 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); |
880 void collect_in_foreground(bool clear_all_soft_refs); | 904 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); |
881 | 905 |
882 // In support of ExplicitGCInvokesConcurrent | 906 // In support of ExplicitGCInvokesConcurrent |
883 static void request_full_gc(unsigned int full_gc_count); | 907 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); |
884 // Should we unload classes in a particular concurrent cycle? | 908 // Should we unload classes in a particular concurrent cycle? |
885 bool should_unload_classes() const { | 909 bool should_unload_classes() const { |
886 return _should_unload_classes; | 910 return _should_unload_classes; |
887 } | 911 } |
888 void update_should_unload_classes(); | 912 void update_should_unload_classes(); |