comparison src/share/vm/oops/markOop.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children d8b3ef7ee3e5
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 // The markOop describes the header of an object.
26 //
27 // Note that the mark is not a real oop but just a word.
28 // It is placed in the oop hierarchy for historical reasons.
29 //
30 // Bit-format of an object header (most significant first):
31 //
32 //
33 // unused:0/25 hash:25/31 age:4 biased_lock:1 lock:2 = 32/64 bits
34 //
35 // - hash contains the identity hash value: largest value is
36 // 31 bits, see os::random(). Also, 64-bit vm's require
37 // a hash value no bigger than 32 bits because they will not
38 // properly generate a mask larger than that: see library_call.cpp
39 // and c1_CodePatterns_sparc.cpp.
40 //
41 // - the biased lock pattern is used to bias a lock toward a given
42 // thread. When this pattern is set in the low three bits, the lock
43 // is either biased toward a given thread or "anonymously" biased,
44 // indicating that it is possible for it to be biased. When the
45 // lock is biased toward a given thread, locking and unlocking can
46 // be performed by that thread without using atomic operations.
47 // When a lock's bias is revoked, it reverts back to the normal
48 // locking scheme described below.
49 //
50 // Note that we are overloading the meaning of the "unlocked" state
51 // of the header. Because we steal a bit from the age we can
52 // guarantee that the bias pattern will never be seen for a truly
53 // unlocked object.
54 //
55 // Note also that the biased state contains the age bits normally
56 // contained in the object header. Large increases in scavenge
57 // times were seen when these bits were absent and an arbitrary age
58 // assigned to all biased objects, because they tended to consume a
59 // significant fraction of the eden semispaces and were not
60 // promoted promptly, causing an increase in the amount of copying
61 // performed. The runtime system aligns all JavaThread* pointers to
62 // a very large value (currently 128 bytes) to make room for the
63 // age bits when biased locking is enabled.
64 //
65 // [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
66 // [0 | epoch | age | 1 | 01] lock is anonymously biased
67 //
68 // - the two lock bits are used to describe three states: locked/unlocked and monitor.
69 //
70 // [ptr | 00] locked ptr points to real header on stack
71 // [header | 0 | 01] unlocked regular object header
72 // [ptr | 10] monitor inflated lock (header is wapped out)
73 // [ptr | 11] marked used by markSweep to mark an object
74 // not valid at any other time
75 //
76 // We assume that stack/thread pointers have the lowest two bits cleared.
77
78 class BasicLock;
79 class ObjectMonitor;
80 class JavaThread;
81
82 class markOopDesc: public oopDesc {
83 private:
84 // Conversion
85 uintptr_t value() const { return (uintptr_t) this; }
86
87 public:
88 // Constants
89 enum { age_bits = 4,
90 lock_bits = 2,
91 biased_lock_bits = 1,
92 max_hash_bits = BitsPerOop - age_bits - lock_bits - biased_lock_bits,
93 hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
94 epoch_bits = 2
95 };
96
97 // The biased locking code currently requires that the age bits be
98 // contiguous to the lock bits. Class data sharing would prefer the
99 // hash bits to be lower down to provide more random hash codes for
100 // shared read-only symbolOop objects, because these objects' mark
101 // words are set to their own address with marked_value in the lock
102 // bit, and using lower bits would make their identity hash values
103 // more random. However, the performance decision was made in favor
104 // of the biased locking code.
105
106 enum { lock_shift = 0,
107 biased_lock_shift = lock_bits,
108 age_shift = lock_bits + biased_lock_bits,
109 hash_shift = lock_bits + biased_lock_bits + age_bits,
110 epoch_shift = hash_shift
111 };
112
113 enum { lock_mask = right_n_bits(lock_bits),
114 lock_mask_in_place = lock_mask << lock_shift,
115 biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits),
116 biased_lock_mask_in_place= biased_lock_mask << lock_shift,
117 biased_lock_bit_in_place = 1 << biased_lock_shift,
118 age_mask = right_n_bits(age_bits),
119 age_mask_in_place = age_mask << age_shift,
120 epoch_mask = right_n_bits(epoch_bits),
121 epoch_mask_in_place = epoch_mask << epoch_shift
122 #ifndef _WIN64
123 ,hash_mask = right_n_bits(hash_bits),
124 hash_mask_in_place = (address_word)hash_mask << hash_shift
125 #endif
126 };
127
128 // Alignment of JavaThread pointers encoded in object header required by biased locking
129 enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits)
130 };
131
132 #ifdef _WIN64
133 // These values are too big for Win64
134 const static uintptr_t hash_mask = right_n_bits(hash_bits);
135 const static uintptr_t hash_mask_in_place =
136 (address_word)hash_mask << hash_shift;
137 #endif
138
139 enum { locked_value = 0,
140 unlocked_value = 1,
141 monitor_value = 2,
142 marked_value = 3,
143 biased_lock_pattern = 5
144 };
145
146 enum { no_hash = 0 }; // no hash value assigned
147
148 enum { no_hash_in_place = (address_word)no_hash << hash_shift,
149 no_lock_in_place = unlocked_value
150 };
151
152 enum { max_age = age_mask };
153
154 enum { max_bias_epoch = epoch_mask };
155
156 // Biased Locking accessors.
157 // These must be checked by all code which calls into the
158 // ObjectSynchronizer and other code. The biasing is not understood
159 // by the lower-level CAS-based locking code, although the runtime
160 // fixes up biased locks to be compatible with it when a bias is
161 // revoked.
162 bool has_bias_pattern() const {
163 return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
164 }
165 JavaThread* biased_locker() const {
166 assert(has_bias_pattern(), "should not call this otherwise");
167 return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
168 }
169 // Indicates that the mark has the bias bit set but that it has not
170 // yet been biased toward a particular thread
171 bool is_biased_anonymously() const {
172 return (has_bias_pattern() && (biased_locker() == NULL));
173 }
174 // Indicates epoch in which this bias was acquired. If the epoch
175 // changes due to too many bias revocations occurring, the biases
176 // from the previous epochs are all considered invalid.
177 int bias_epoch() const {
178 assert(has_bias_pattern(), "should not call this otherwise");
179 return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
180 }
181 markOop set_bias_epoch(int epoch) {
182 assert(has_bias_pattern(), "should not call this otherwise");
183 assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
184 return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
185 }
186 markOop incr_bias_epoch() {
187 return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
188 }
189 // Prototype mark for initialization
190 static markOop biased_locking_prototype() {
191 return markOop( biased_lock_pattern );
192 }
193
194 // lock accessors (note that these assume lock_shift == 0)
195 bool is_locked() const {
196 return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
197 }
198 bool is_unlocked() const {
199 return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
200 }
201 bool is_marked() const {
202 return (mask_bits(value(), lock_mask_in_place) == marked_value);
203 }
204 bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
205
206 // Special temporary state of the markOop while being inflated.
207 // Code that looks at mark outside a lock need to take this into account.
208 bool is_being_inflated() const { return (value() == 0); }
209
210 // Distinguished markword value - used when inflating over
211 // an existing stacklock. 0 indicates the markword is "BUSY".
212 // Lockword mutators that use a LD...CAS idiom should always
213 // check for and avoid overwriting a 0 value installed by some
214 // other thread. (They should spin or block instead. The 0 value
215 // is transient and *should* be short-lived).
216 static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress
217
218 // Should this header be preserved during GC?
219 bool must_be_preserved(oop obj_containing_mark) const {
220 if (!UseBiasedLocking)
221 return (!is_unlocked() || !has_no_hash());
222 return must_be_preserved_with_bias(obj_containing_mark);
223 }
224 inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
225
226 // Should this header (including its age bits) be preserved in the
227 // case of a promotion failure during scavenge?
228 // Note that we special case this situation. We want to avoid
229 // calling BiasedLocking::preserve_marks()/restore_marks() (which
230 // decrease the number of mark words that need to be preserved
231 // during GC) during each scavenge. During scavenges in which there
232 // is no promotion failure, we actually don't need to call the above
233 // routines at all, since we don't mutate and re-initialize the
234 // marks of promoted objects using init_mark(). However, during
235 // scavenges which result in promotion failure, we do re-initialize
236 // the mark words of objects, meaning that we should have called
237 // these mark word preservation routines. Currently there's no good
238 // place in which to call them in any of the scavengers (although
239 // guarded by appropriate locks we could make one), but the
240 // observation is that promotion failures are quite rare and
241 // reducing the number of mark words preserved during them isn't a
242 // high priority.
243 bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
244 if (!UseBiasedLocking)
245 return (this != prototype());
246 return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
247 }
248 inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const;
249
250 // Should this header be preserved during a scavenge where CMS is
251 // the old generation?
252 // (This is basically the same body as must_be_preserved_for_promotion_failure(),
253 // but takes the klassOop as argument instead)
254 bool must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const {
255 if (!UseBiasedLocking)
256 return (this != prototype());
257 return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
258 }
259 inline bool must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
260
261 // WARNING: The following routines are used EXCLUSIVELY by
262 // synchronization functions. They are not really gc safe.
263 // They must get updated if markOop layout get changed.
264 markOop set_unlocked() const {
265 return markOop(value() | unlocked_value);
266 }
267 bool has_locker() const {
268 return ((value() & lock_mask_in_place) == locked_value);
269 }
270 BasicLock* locker() const {
271 assert(has_locker(), "check");
272 return (BasicLock*) value();
273 }
274 bool has_monitor() const {
275 return ((value() & monitor_value) != 0);
276 }
277 ObjectMonitor* monitor() const {
278 assert(has_monitor(), "check");
279 // Use xor instead of &~ to provide one extra tag-bit check.
280 return (ObjectMonitor*) (value() ^ monitor_value);
281 }
282 bool has_displaced_mark_helper() const {
283 return ((value() & unlocked_value) == 0);
284 }
285 markOop displaced_mark_helper() const {
286 assert(has_displaced_mark_helper(), "check");
287 intptr_t ptr = (value() & ~monitor_value);
288 return *(markOop*)ptr;
289 }
290 void set_displaced_mark_helper(markOop m) const {
291 assert(has_displaced_mark_helper(), "check");
292 intptr_t ptr = (value() & ~monitor_value);
293 *(markOop*)ptr = m;
294 }
295 markOop copy_set_hash(intptr_t hash) const {
296 intptr_t tmp = value() & (~hash_mask_in_place);
297 tmp |= ((hash & hash_mask) << hash_shift);
298 return (markOop)tmp;
299 }
300 // it is only used to be stored into BasicLock as the
301 // indicator that the lock is using heavyweight monitor
302 static markOop unused_mark() {
303 return (markOop) marked_value;
304 }
305 // the following two functions create the markOop to be
306 // stored into object header, it encodes monitor info
307 static markOop encode(BasicLock* lock) {
308 return (markOop) lock;
309 }
310 static markOop encode(ObjectMonitor* monitor) {
311 intptr_t tmp = (intptr_t) monitor;
312 return (markOop) (tmp | monitor_value);
313 }
314 static markOop encode(JavaThread* thread, int age, int bias_epoch) {
315 intptr_t tmp = (intptr_t) thread;
316 assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
317 assert(age <= max_age, "age too large");
318 assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
319 return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
320 }
321
322 // used to encode pointers during GC
323 markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
324
325 // age operations
326 markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
327
328 int age() const { return mask_bits(value() >> age_shift, age_mask); }
329 markOop set_age(int v) const {
330 assert((v & ~age_mask) == 0, "shouldn't overflow age field");
331 return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
332 }
333 markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
334
335 // hash operations
336 intptr_t hash() const {
337 return mask_bits(value() >> hash_shift, hash_mask);
338 }
339
340 bool has_no_hash() const {
341 return hash() == no_hash;
342 }
343
344 // Prototype mark for initialization
345 static markOop prototype() {
346 return markOop( no_hash_in_place | no_lock_in_place );
347 }
348
349 // Helper function for restoration of unmarked mark oops during GC
350 static inline markOop prototype_for_object(oop obj);
351
352 // Debugging
353 void print_on(outputStream* st) const;
354
355 // Prepare address of oop for placement into mark
356 inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
357
358 // Recover address of oop from encoded form used in mark
359 inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
360 };