Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp @ 20404:227a9e5e4b4a
8057536: Refactor G1 to allow context specific allocations
Summary: Splitting out a g1 allocator class to simply specialized allocators which can associate each allocation with a given context.
Reviewed-by: mgerdin, brutisso
author | sjohanss |
---|---|
date | Fri, 05 Sep 2014 09:49:19 +0200 |
parents | a2328cbebb23 |
children | 6948da6d7c13 |
comparison
equal
deleted
inserted
replaced
20403:8ec8971f511a | 20404:227a9e5e4b4a |
---|---|
44 RefToScanQueue* _refs; | 44 RefToScanQueue* _refs; |
45 DirtyCardQueue _dcq; | 45 DirtyCardQueue _dcq; |
46 G1SATBCardTableModRefBS* _ct_bs; | 46 G1SATBCardTableModRefBS* _ct_bs; |
47 G1RemSet* _g1_rem; | 47 G1RemSet* _g1_rem; |
48 | 48 |
49 G1ParGCAllocBuffer _surviving_alloc_buffer; | 49 G1ParGCAllocator* _g1_par_allocator; |
50 G1ParGCAllocBuffer _tenured_alloc_buffer; | 50 |
51 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; | |
52 ageTable _age_table; | 51 ageTable _age_table; |
53 | 52 |
54 G1ParScanClosure _scanner; | 53 G1ParScanClosure _scanner; |
55 | 54 |
56 size_t _alloc_buffer_waste; | 55 size_t _alloc_buffer_waste; |
76 size_t* _surviving_young_words; | 75 size_t* _surviving_young_words; |
77 | 76 |
78 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) | 77 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) |
79 | 78 |
80 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } | 79 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } |
81 | |
82 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } | 80 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } |
83 | 81 |
84 DirtyCardQueue& dirty_card_queue() { return _dcq; } | 82 DirtyCardQueue& dirty_card_queue() { return _dcq; } |
85 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } | 83 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } |
86 | 84 |
102 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); | 100 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); |
103 ~G1ParScanThreadState(); | 101 ~G1ParScanThreadState(); |
104 | 102 |
105 ageTable* age_table() { return &_age_table; } | 103 ageTable* age_table() { return &_age_table; } |
106 | 104 |
107 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { | |
108 return _alloc_buffers[purpose]; | |
109 } | |
110 | |
111 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } | |
112 size_t undo_waste() const { return _undo_waste; } | |
113 | |
114 #ifdef ASSERT | 105 #ifdef ASSERT |
115 bool queue_is_empty() const { return _refs->is_empty(); } | 106 bool queue_is_empty() const { return _refs->is_empty(); } |
116 | 107 |
117 bool verify_ref(narrowOop* ref) const; | 108 bool verify_ref(narrowOop* ref) const; |
118 bool verify_ref(oop* ref) const; | 109 bool verify_ref(oop* ref) const; |
124 _refs->push(ref); | 115 _refs->push(ref); |
125 } | 116 } |
126 | 117 |
127 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid); | 118 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid); |
128 | 119 |
129 private: | |
130 | |
131 inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz); | |
132 inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz); | |
133 inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz); | |
134 | |
135 public: | 120 public: |
136 | 121 |
137 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { | 122 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { |
138 _evac_failure_cl = evac_failure_cl; | 123 _evac_failure_cl = evac_failure_cl; |
139 } | 124 } |
175 // age -1 regions (i.e. non-young ones) | 160 // age -1 regions (i.e. non-young ones) |
176 return _surviving_young_words; | 161 return _surviving_young_words; |
177 } | 162 } |
178 | 163 |
179 private: | 164 private: |
180 void retire_alloc_buffers(); | |
181 | |
182 #define G1_PARTIAL_ARRAY_MASK 0x2 | 165 #define G1_PARTIAL_ARRAY_MASK 0x2 |
183 | 166 |
184 inline bool has_partial_array_mask(oop* ref) const { | 167 inline bool has_partial_array_mask(oop* ref) const { |
185 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; | 168 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; |
186 } | 169 } |