comparison src/share/vm/memory/defNewGeneration.hpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents a61af66fc99e
children d1605aabd0a1 12eea04c8b06
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
22 * 22 *
23 */ 23 */
24 24
25 class EdenSpace; 25 class EdenSpace;
26 class ContiguousSpace; 26 class ContiguousSpace;
27 class ScanClosure;
27 28
28 // DefNewGeneration is a young generation containing eden, from- and 29 // DefNewGeneration is a young generation containing eden, from- and
29 // to-space. 30 // to-space.
30 31
31 class DefNewGeneration: public Generation { 32 class DefNewGeneration: public Generation {
153 154
154 class KeepAliveClosure: public OopClosure { 155 class KeepAliveClosure: public OopClosure {
155 protected: 156 protected:
156 ScanWeakRefClosure* _cl; 157 ScanWeakRefClosure* _cl;
157 CardTableRS* _rs; 158 CardTableRS* _rs;
159 template <class T> void do_oop_work(T* p);
158 public: 160 public:
159 KeepAliveClosure(ScanWeakRefClosure* cl); 161 KeepAliveClosure(ScanWeakRefClosure* cl);
160 void do_oop(oop* p); 162 virtual void do_oop(oop* p);
163 virtual void do_oop(narrowOop* p);
161 }; 164 };
162 165
163 class FastKeepAliveClosure: public KeepAliveClosure { 166 class FastKeepAliveClosure: public KeepAliveClosure {
164 protected: 167 protected:
165 HeapWord* _boundary; 168 HeapWord* _boundary;
169 template <class T> void do_oop_work(T* p);
166 public: 170 public:
167 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl); 171 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
168 void do_oop(oop* p); 172 virtual void do_oop(oop* p);
173 virtual void do_oop(narrowOop* p);
169 }; 174 };
170 175
171 class EvacuateFollowersClosure: public VoidClosure { 176 class EvacuateFollowersClosure: public VoidClosure {
172 GenCollectedHeap* _gch; 177 GenCollectedHeap* _gch;
173 int _level; 178 int _level;
204 // Accessing spaces 209 // Accessing spaces
205 EdenSpace* eden() const { return _eden_space; } 210 EdenSpace* eden() const { return _eden_space; }
206 ContiguousSpace* from() const { return _from_space; } 211 ContiguousSpace* from() const { return _from_space; }
207 ContiguousSpace* to() const { return _to_space; } 212 ContiguousSpace* to() const { return _to_space; }
208 213
209 inline CompactibleSpace* first_compaction_space() const; 214 virtual CompactibleSpace* first_compaction_space() const;
210 215
211 // Space enquiries 216 // Space enquiries
212 size_t capacity() const; 217 size_t capacity() const;
213 size_t used() const; 218 size_t used() const;
214 size_t free() const; 219 size_t free() const;
224 HeapWord** top_addr() const; 229 HeapWord** top_addr() const;
225 HeapWord** end_addr() const; 230 HeapWord** end_addr() const;
226 231
227 // Thread-local allocation buffers 232 // Thread-local allocation buffers
228 bool supports_tlab_allocation() const { return true; } 233 bool supports_tlab_allocation() const { return true; }
229 inline size_t tlab_capacity() const; 234 size_t tlab_capacity() const;
230 inline size_t unsafe_max_tlab_alloc() const; 235 size_t unsafe_max_tlab_alloc() const;
231 236
232 // Grow the generation by the specified number of bytes. 237 // Grow the generation by the specified number of bytes.
233 // The size of bytes is assumed to be properly aligned. 238 // The size of bytes is assumed to be properly aligned.
234 // Return true if the expansion was successful. 239 // Return true if the expansion was successful.
235 bool expand(size_t bytes); 240 bool expand(size_t bytes);
263 size_ok; 268 size_ok;
264 269
265 return result; 270 return result;
266 } 271 }
267 272
268 inline HeapWord* allocate(size_t word_size, bool is_tlab); 273 HeapWord* allocate(size_t word_size, bool is_tlab);
269 HeapWord* allocate_from_space(size_t word_size); 274 HeapWord* allocate_from_space(size_t word_size);
270 275
271 inline HeapWord* par_allocate(size_t word_size, bool is_tlab); 276 HeapWord* par_allocate(size_t word_size, bool is_tlab);
272 277
273 // Prologue & Epilogue 278 // Prologue & Epilogue
274 inline virtual void gc_prologue(bool full); 279 virtual void gc_prologue(bool full);
275 virtual void gc_epilogue(bool full); 280 virtual void gc_epilogue(bool full);
276 281
277 // Doesn't require additional work during GC prologue and epilogue 282 // Doesn't require additional work during GC prologue and epilogue
278 virtual bool performs_in_place_marking() const { return false; } 283 virtual bool performs_in_place_marking() const { return false; }
279 284
305 bool is_tlab); 310 bool is_tlab);
306 HeapWord* expand_and_allocate(size_t size, 311 HeapWord* expand_and_allocate(size_t size,
307 bool is_tlab, 312 bool is_tlab,
308 bool parallel = false); 313 bool parallel = false);
309 314
310 oop copy_to_survivor_space(oop old, oop* from); 315 oop copy_to_survivor_space(oop old);
311 int tenuring_threshold() { return _tenuring_threshold; } 316 int tenuring_threshold() { return _tenuring_threshold; }
312 317
313 // Performance Counter support 318 // Performance Counter support
314 void update_counters(); 319 void update_counters();
315 320