comparison src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents a61af66fc99e
children d1605aabd0a1
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
39 // End move to some global location 39 // End move to some global location
40 40
41 class MutableSpace; 41 class MutableSpace;
42 class PSOldGen; 42 class PSOldGen;
43 class ParCompactionManager; 43 class ParCompactionManager;
44
45 #define PS_CHUNKED_ARRAY_OOP_MASK 1
46 44
47 #define PS_PM_STATS 0 45 #define PS_PM_STATS 0
48 46
49 class PSPromotionManager : public CHeapObj { 47 class PSPromotionManager : public CHeapObj {
50 friend class PSScavenge; 48 friend class PSScavenge;
78 bool _young_gen_is_full; 76 bool _young_gen_is_full;
79 bool _old_gen_is_full; 77 bool _old_gen_is_full;
80 PrefetchQueue _prefetch_queue; 78 PrefetchQueue _prefetch_queue;
81 79
82 OopStarTaskQueue _claimed_stack_depth; 80 OopStarTaskQueue _claimed_stack_depth;
83 GrowableArray<oop*>* _overflow_stack_depth; 81 GrowableArray<StarTask>* _overflow_stack_depth;
84 OopTaskQueue _claimed_stack_breadth; 82 OopTaskQueue _claimed_stack_breadth;
85 GrowableArray<oop>* _overflow_stack_breadth; 83 GrowableArray<oop>* _overflow_stack_breadth;
86 84
87 bool _depth_first; 85 bool _depth_first;
88 bool _totally_drain; 86 bool _totally_drain;
90 88
91 uint _array_chunk_size; 89 uint _array_chunk_size;
92 uint _min_array_size_for_chunking; 90 uint _min_array_size_for_chunking;
93 91
94 // Accessors 92 // Accessors
95 static PSOldGen* old_gen() { return _old_gen; } 93 static PSOldGen* old_gen() { return _old_gen; }
96 static MutableSpace* young_space() { return _young_space; } 94 static MutableSpace* young_space() { return _young_space; }
97 95
98 inline static PSPromotionManager* manager_array(int index); 96 inline static PSPromotionManager* manager_array(int index);
99 97 template <class T> inline void claim_or_forward_internal_depth(T* p);
100 GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; } 98 template <class T> inline void claim_or_forward_internal_breadth(T* p);
101 GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; } 99
100 GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
101 GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
102 102
103 // On the task queues we push reference locations as well as 103 // On the task queues we push reference locations as well as
104 // partially-scanned arrays (in the latter case, we push an oop to 104 // partially-scanned arrays (in the latter case, we push an oop to
105 // the from-space image of the array and the length on the 105 // the from-space image of the array and the length on the
106 // from-space image indicates how many entries on the array we still 106 // from-space image indicates how many entries on the array we still
114 // oop*). This is because of the difference in types between what 114 // oop*). This is because of the difference in types between what
115 // the task queue holds (oop*) and oops to partially-scanned arrays 115 // the task queue holds (oop*) and oops to partially-scanned arrays
116 // (oop). We do all the necessary casting in the mask / unmask 116 // (oop). We do all the necessary casting in the mask / unmask
117 // methods to avoid sprinkling the rest of the code with more casts. 117 // methods to avoid sprinkling the rest of the code with more casts.
118 118
119 bool is_oop_masked(oop* p) { 119 // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
120 return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK; 120 // future masks) can't conflict with COMPRESSED_OOP_MASK
121 #define PS_CHUNKED_ARRAY_OOP_MASK 0x2
122
123 bool is_oop_masked(StarTask p) {
124 // If something is marked chunked it's always treated like wide oop*
125 return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
126 PS_CHUNKED_ARRAY_OOP_MASK;
121 } 127 }
122 128
123 oop* mask_chunked_array_oop(oop obj) { 129 oop* mask_chunked_array_oop(oop obj) {
124 assert(!is_oop_masked((oop*) obj), "invariant"); 130 assert(!is_oop_masked((oop*) obj), "invariant");
125 oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK); 131 oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
126 assert(is_oop_masked(ret), "invariant"); 132 assert(is_oop_masked(ret), "invariant");
127 return ret; 133 return ret;
128 } 134 }
129 135
130 oop unmask_chunked_array_oop(oop* p) { 136 oop unmask_chunked_array_oop(StarTask p) {
131 assert(is_oop_masked(p), "invariant"); 137 assert(is_oop_masked(p), "invariant");
132 oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK); 138 assert(!p.is_narrow(), "chunked array oops cannot be narrow");
139 oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
140 oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
133 assert(!is_oop_masked((oop*) ret), "invariant"); 141 assert(!is_oop_masked((oop*) ret), "invariant");
134 return ret; 142 return ret;
135 } 143 }
136 144
145 template <class T> void process_array_chunk_work(oop obj,
146 int start, int end);
137 void process_array_chunk(oop old); 147 void process_array_chunk(oop old);
138 148
139 void push_depth(oop* p) { 149 template <class T> void push_depth(T* p) {
140 assert(depth_first(), "pre-condition"); 150 assert(depth_first(), "pre-condition");
141 151
142 #if PS_PM_STATS 152 #if PS_PM_STATS
143 ++_total_pushes; 153 ++_total_pushes;
144 #endif // PS_PM_STATS 154 #endif // PS_PM_STATS
173 #endif // PS_PM_STATS 183 #endif // PS_PM_STATS
174 } 184 }
175 } 185 }
176 186
177 protected: 187 protected:
178 static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } 188 static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
179 static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; } 189 static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
180 190
181 public: 191 public:
182 // Static 192 // Static
183 static void initialize(); 193 static void initialize();
225 drain_stacks_depth(totally_drain); 235 drain_stacks_depth(totally_drain);
226 } else { 236 } else {
227 drain_stacks_breadth(totally_drain); 237 drain_stacks_breadth(totally_drain);
228 } 238 }
229 } 239 }
240 public:
230 void drain_stacks_cond_depth() { 241 void drain_stacks_cond_depth() {
231 if (claimed_stack_depth()->size() > _target_stack_size) { 242 if (claimed_stack_depth()->size() > _target_stack_size) {
232 drain_stacks_depth(false); 243 drain_stacks_depth(false);
233 } 244 }
234 } 245 }
254 } 265 }
255 bool depth_first() { 266 bool depth_first() {
256 return _depth_first; 267 return _depth_first;
257 } 268 }
258 269
259 inline void process_popped_location_depth(oop* p); 270 inline void process_popped_location_depth(StarTask p);
260 271
261 inline void flush_prefetch_queue(); 272 inline void flush_prefetch_queue();
262 273 template <class T> inline void claim_or_forward_depth(T* p);
263 inline void claim_or_forward_depth(oop* p); 274 template <class T> inline void claim_or_forward_breadth(T* p);
264 inline void claim_or_forward_internal_depth(oop* p);
265
266 inline void claim_or_forward_breadth(oop* p);
267 inline void claim_or_forward_internal_breadth(oop* p);
268 275
269 #if PS_PM_STATS 276 #if PS_PM_STATS
270 void increment_steals(oop* p = NULL) { 277 void increment_steals(oop* p = NULL) {
271 _total_steals += 1; 278 _total_steals += 1;
272 if (p != NULL && is_oop_masked(p)) { 279 if (p != NULL && is_oop_masked(p)) {