Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/g1/g1OopClosures.hpp @ 845:df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
Summary: Modifications to G1 so as to allow the use of compressed oops.
Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp
author | ysr |
---|---|
date | Tue, 14 Jul 2009 15:40:39 -0700 |
parents | 0fbdb4381b99 |
children | 0414c1049f15 |
comparison
equal
deleted
inserted
replaced
839:bb18957ad21e | 845:df6caf649ff7 |
---|---|
40 HeapRegion* _from; | 40 HeapRegion* _from; |
41 public: | 41 public: |
42 virtual void set_region(HeapRegion* from) { _from = from; } | 42 virtual void set_region(HeapRegion* from) { _from = from; } |
43 }; | 43 }; |
44 | 44 |
45 | |
46 class G1ScanAndBalanceClosure : public OopClosure { | |
47 G1CollectedHeap* _g1; | |
48 static int _nq; | |
49 public: | |
50 G1ScanAndBalanceClosure(G1CollectedHeap* g1) : _g1(g1) { } | |
51 inline void do_oop_nv(oop* p); | |
52 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | |
53 virtual void do_oop(oop* p); | |
54 virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } | |
55 }; | |
56 | |
57 class G1ParClosureSuper : public OopsInHeapRegionClosure { | 45 class G1ParClosureSuper : public OopsInHeapRegionClosure { |
58 protected: | 46 protected: |
59 G1CollectedHeap* _g1; | 47 G1CollectedHeap* _g1; |
60 G1RemSet* _g1_rem; | 48 G1RemSet* _g1_rem; |
61 ConcurrentMark* _cm; | 49 ConcurrentMark* _cm; |
67 | 55 |
68 class G1ParScanClosure : public G1ParClosureSuper { | 56 class G1ParScanClosure : public G1ParClosureSuper { |
69 public: | 57 public: |
70 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | 58 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
71 G1ParClosureSuper(g1, par_scan_state) { } | 59 G1ParClosureSuper(g1, par_scan_state) { } |
72 void do_oop_nv(oop* p); // should be made inline | 60 template <class T> void do_oop_nv(T* p); |
73 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | |
74 virtual void do_oop(oop* p) { do_oop_nv(p); } | 61 virtual void do_oop(oop* p) { do_oop_nv(p); } |
75 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } | 62 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
76 }; | 63 }; |
77 | 64 |
78 #define G1_PARTIAL_ARRAY_MASK 1 | 65 #define G1_PARTIAL_ARRAY_MASK 0x2 |
79 | 66 |
80 inline bool has_partial_array_mask(oop* ref) { | 67 template <class T> inline bool has_partial_array_mask(T* ref) { |
81 return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK; | 68 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; |
82 } | 69 } |
83 | 70 |
84 inline oop* set_partial_array_mask(oop obj) { | 71 template <class T> inline T* set_partial_array_mask(T obj) { |
85 return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK); | 72 assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); |
73 return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK); | |
86 } | 74 } |
87 | 75 |
88 inline oop clear_partial_array_mask(oop* ref) { | 76 template <class T> inline oop clear_partial_array_mask(T* ref) { |
89 return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK); | 77 return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); |
90 } | 78 } |
91 | 79 |
92 class G1ParScanPartialArrayClosure : public G1ParClosureSuper { | 80 class G1ParScanPartialArrayClosure : public G1ParClosureSuper { |
93 G1ParScanClosure _scanner; | 81 G1ParScanClosure _scanner; |
94 template <class T> void process_array_chunk(oop obj, int start, int end); | |
95 public: | 82 public: |
96 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | 83 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
97 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { } | 84 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { } |
98 void do_oop_nv(oop* p); | 85 template <class T> void do_oop_nv(T* p); |
99 void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | |
100 virtual void do_oop(oop* p) { do_oop_nv(p); } | 86 virtual void do_oop(oop* p) { do_oop_nv(p); } |
101 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } | 87 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
102 }; | 88 }; |
103 | 89 |
104 | 90 |
105 class G1ParCopyHelper : public G1ParClosureSuper { | 91 class G1ParCopyHelper : public G1ParClosureSuper { |
106 G1ParScanClosure *_scanner; | 92 G1ParScanClosure *_scanner; |
107 protected: | 93 protected: |
108 void mark_forwardee(oop* p); | 94 template <class T> void mark_forwardee(T* p); |
109 oop copy_to_survivor_space(oop obj); | 95 oop copy_to_survivor_space(oop obj); |
110 public: | 96 public: |
111 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, | 97 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, |
112 G1ParScanClosure *scanner) : | 98 G1ParScanClosure *scanner) : |
113 G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { } | 99 G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { } |
115 | 101 |
116 template<bool do_gen_barrier, G1Barrier barrier, | 102 template<bool do_gen_barrier, G1Barrier barrier, |
117 bool do_mark_forwardee, bool skip_cset_test> | 103 bool do_mark_forwardee, bool skip_cset_test> |
118 class G1ParCopyClosure : public G1ParCopyHelper { | 104 class G1ParCopyClosure : public G1ParCopyHelper { |
119 G1ParScanClosure _scanner; | 105 G1ParScanClosure _scanner; |
120 void do_oop_work(oop* p); | 106 template <class T> void do_oop_work(T* p); |
121 void do_oop_work(narrowOop* p) { guarantee(false, "NYI"); } | |
122 public: | 107 public: |
123 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : | 108 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
124 _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { } | 109 _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { } |
125 inline void do_oop_nv(oop* p) { | 110 template <class T> void do_oop_nv(T* p) { |
126 do_oop_work(p); | 111 do_oop_work(p); |
127 if (do_mark_forwardee) | 112 if (do_mark_forwardee) |
128 mark_forwardee(p); | 113 mark_forwardee(p); |
129 } | 114 } |
130 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | |
131 virtual void do_oop(oop* p) { do_oop_nv(p); } | 115 virtual void do_oop(oop* p) { do_oop_nv(p); } |
132 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } | 116 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
133 }; | 117 }; |
134 | 118 |
135 typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure; | 119 typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure; |
136 typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure; | 120 typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure; |
121 typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure; | |
137 typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure; | 122 typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure; |
138 typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure; | 123 typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure; |
139 typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure; | |
140 typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure; | 124 typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure; |
141 // This is the only case when we set skip_cset_test. Basically, this | 125 // This is the only case when we set skip_cset_test. Basically, this |
142 // closure is (should?) only be called directly while we're draining | 126 // closure is (should?) only be called directly while we're draining |
143 // the overflow and task queues. In that case we know that the | 127 // the overflow and task queues. In that case we know that the |
144 // reference in question points into the collection set, otherwise we | 128 // reference in question points into the collection set, otherwise we |
145 // would not have pushed it on the queue. | 129 // would not have pushed it on the queue. The following is defined in |
146 typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure; | 130 // g1_specialized_oop_closures.hpp. |
131 // typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure; | |
147 // We need a separate closure to handle references during evacuation | 132 // We need a separate closure to handle references during evacuation |
148 // failure processing, as it cannot asume that the reference already | 133 // failure processing, as we cannot asume that the reference already |
149 // points to the collection set (like G1ParScanHeapEvacClosure does). | 134 // points into the collection set (like G1ParScanHeapEvacClosure does). |
150 typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure; | 135 typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure; |
151 | 136 |
152 class FilterIntoCSClosure: public OopClosure { | 137 class FilterIntoCSClosure: public OopClosure { |
153 G1CollectedHeap* _g1; | 138 G1CollectedHeap* _g1; |
154 OopClosure* _oc; | 139 OopClosure* _oc; |
156 public: | 141 public: |
157 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl, | 142 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl, |
158 G1CollectedHeap* g1, OopClosure* oc) : | 143 G1CollectedHeap* g1, OopClosure* oc) : |
159 _dcto_cl(dcto_cl), _g1(g1), _oc(oc) | 144 _dcto_cl(dcto_cl), _g1(g1), _oc(oc) |
160 {} | 145 {} |
161 inline void do_oop_nv(oop* p); | 146 template <class T> void do_oop_nv(T* p); |
162 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | 147 virtual void do_oop(oop* p) { do_oop_nv(p); } |
163 virtual void do_oop(oop* p); | 148 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
164 virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } | |
165 bool apply_to_weak_ref_discovered_field() { return true; } | 149 bool apply_to_weak_ref_discovered_field() { return true; } |
166 bool do_header() { return false; } | 150 bool do_header() { return false; } |
167 }; | 151 }; |
168 | 152 |
169 class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure { | 153 class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure { |
172 public: | 156 public: |
173 FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, | 157 FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, |
174 OopsInHeapRegionClosure* oc) : | 158 OopsInHeapRegionClosure* oc) : |
175 _g1(g1), _oc(oc) | 159 _g1(g1), _oc(oc) |
176 {} | 160 {} |
177 inline void do_oop_nv(oop* p); | 161 template <class T> void do_oop_nv(T* p); |
178 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | 162 virtual void do_oop(oop* p) { do_oop_nv(p); } |
179 virtual void do_oop(oop* p); | 163 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
180 virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } | |
181 bool apply_to_weak_ref_discovered_field() { return true; } | 164 bool apply_to_weak_ref_discovered_field() { return true; } |
182 bool do_header() { return false; } | 165 bool do_header() { return false; } |
183 void set_region(HeapRegion* from) { | 166 void set_region(HeapRegion* from) { |
184 _oc->set_region(from); | 167 _oc->set_region(from); |
185 } | 168 } |
193 FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, | 176 FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, |
194 OopsInHeapRegionClosure* oc, | 177 OopsInHeapRegionClosure* oc, |
195 ConcurrentMark* cm) | 178 ConcurrentMark* cm) |
196 : _g1(g1), _oc(oc), _cm(cm) { } | 179 : _g1(g1), _oc(oc), _cm(cm) { } |
197 | 180 |
198 inline void do_oop_nv(oop* p); | 181 template <class T> void do_oop_nv(T* p); |
199 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | 182 virtual void do_oop(oop* p) { do_oop_nv(p); } |
200 virtual void do_oop(oop* p); | 183 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
201 virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } | |
202 bool apply_to_weak_ref_discovered_field() { return true; } | 184 bool apply_to_weak_ref_discovered_field() { return true; } |
203 bool do_header() { return false; } | 185 bool do_header() { return false; } |
204 void set_region(HeapRegion* from) { | 186 void set_region(HeapRegion* from) { |
205 _oc->set_region(from); | 187 _oc->set_region(from); |
206 } | 188 } |
211 HeapWord* _r_end; | 193 HeapWord* _r_end; |
212 OopClosure* _oc; | 194 OopClosure* _oc; |
213 int _out_of_region; | 195 int _out_of_region; |
214 public: | 196 public: |
215 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc); | 197 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc); |
216 inline void do_oop_nv(oop* p); | 198 template <class T> void do_oop_nv(T* p); |
217 inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } | 199 virtual void do_oop(oop* p) { do_oop_nv(p); } |
218 virtual void do_oop(oop* p); | 200 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } |
219 virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } | |
220 bool apply_to_weak_ref_discovered_field() { return true; } | 201 bool apply_to_weak_ref_discovered_field() { return true; } |
221 bool do_header() { return false; } | 202 bool do_header() { return false; } |
222 int out_of_region() { return _out_of_region; } | 203 int out_of_region() { return _out_of_region; } |
223 }; | 204 }; |