comparison src/share/vm/oops/oop.inline.hpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents a61af66fc99e
children 435e64505015
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
23 */ 23 */
24 24
25 // Implementation of all inlined member functions defined in oop.hpp 25 // Implementation of all inlined member functions defined in oop.hpp
26 // We need a separate file to avoid circular references 26 // We need a separate file to avoid circular references
27 27
28
29 inline void oopDesc::release_set_mark(markOop m) { 28 inline void oopDesc::release_set_mark(markOop m) {
30 OrderAccess::release_store_ptr(&_mark, m); 29 OrderAccess::release_store_ptr(&_mark, m);
31 } 30 }
32 31
33 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 32 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
34 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 33 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
34 }
35
36 inline klassOop oopDesc::klass() const {
37 if (UseCompressedOops) {
38 return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
39 // can be NULL in CMS, but isn't supported on CMS yet.
40 } else {
41 return _metadata._klass;
42 }
43 }
44
45 inline int oopDesc::klass_gap_offset_in_bytes() {
46 assert(UseCompressedOops, "only applicable to compressed headers");
47 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
48 }
49
50 inline oop* oopDesc::klass_addr() {
51 // Only used internally and with CMS and will not work with
52 // UseCompressedOops
53 assert(!UseCompressedOops, "only supported with uncompressed oops");
54 return (oop*) &_metadata._klass;
55 }
56
57 inline narrowOop* oopDesc::compressed_klass_addr() {
58 assert(UseCompressedOops, "only called by compressed oops");
59 return (narrowOop*) &_metadata._compressed_klass;
35 } 60 }
36 61
37 inline void oopDesc::set_klass(klassOop k) { 62 inline void oopDesc::set_klass(klassOop k) {
38 // since klasses are promoted no store check is needed 63 // since klasses are promoted no store check is needed
39 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop"); 64 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
40 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop"); 65 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
41 oop_store_without_check((oop*) &_klass, (oop) k); 66 if (UseCompressedOops) {
67 // zero the gap when the klass is set, by zeroing the pointer sized
68 // part of the union.
69 _metadata._klass = NULL;
70 oop_store_without_check(compressed_klass_addr(), (oop)k);
71 } else {
72 oop_store_without_check(klass_addr(), (oop) k);
73 }
42 } 74 }
43 75
44 inline void oopDesc::set_klass_to_list_ptr(oop k) { 76 inline void oopDesc::set_klass_to_list_ptr(oop k) {
45 // This is only to be used during GC, for from-space objects, so no 77 // This is only to be used during GC, for from-space objects, so no
46 // barrier is needed. 78 // barrier is needed.
47 _klass = (klassOop)k; 79 if (UseCompressedOops) {
80 _metadata._compressed_klass = encode_heap_oop_not_null(k);
81 } else {
82 _metadata._klass = (klassOop)k;
83 }
48 } 84 }
49 85
50 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } 86 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
51 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); } 87 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); }
52 88
68 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); } 104 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); }
69 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); } 105 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); }
70 106
71 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 107 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
72 108
73 inline oop* oopDesc::obj_field_addr(int offset) const { return (oop*) field_base(offset); } 109 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
74 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 110 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
75 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 111 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
76 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } 112 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
77 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 113 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
78 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 114 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
79 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } 115 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
80 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } 116 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
81 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } 117 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
82 118 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
83 inline oop oopDesc::obj_field(int offset) const { return *obj_field_addr(offset); } 119
84 inline void oopDesc::obj_field_put(int offset, oop value) { oop_store(obj_field_addr(offset), value); } 120
121 // Functions for getting and setting oops within instance objects.
122 // If the oops are compressed, the type passed to these overloaded functions
123 // is narrowOop. All functions are overloaded so they can be called by
124 // template functions without conditionals (the compiler instantiates via
125 // the right type and inlines the appopriate code).
126
127 inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
128 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
129
130 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
131 // offset from the heap base. Saving the check for null can save instructions
132 // in inner GC loops so these are separated.
133
134 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
135 assert(!is_null(v), "oop value can never be zero");
136 address heap_base = Universe::heap_base();
137 uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes);
138 assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow");
139 return (narrowOop)result;
140 }
141
142 inline narrowOop oopDesc::encode_heap_oop(oop v) {
143 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
144 }
145
146 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
147 assert(!is_null(v), "narrow oop value can never be zero");
148 address heap_base = Universe::heap_base();
149 return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes));
150 }
151
152 inline oop oopDesc::decode_heap_oop(narrowOop v) {
153 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
154 }
155
156 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
157 inline oop oopDesc::decode_heap_oop(oop v) { return v; }
158
159 // Load an oop out of the Java heap as is without decoding.
160 // Called by GC to check for null before decoding.
161 inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
162 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
163
164 // Load and decode an oop out of the Java heap into a wide oop.
165 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
166 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
167 return decode_heap_oop_not_null(*p);
168 }
169
170 // Load and decode an oop out of the heap accepting null
171 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
172 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
173 return decode_heap_oop(*p);
174 }
175
176 // Store already encoded heap oop into the heap.
177 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
178 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
179
180 // Encode and store a heap oop.
181 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
182 *p = encode_heap_oop_not_null(v);
183 }
184 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
185
186 // Encode and store a heap oop allowing for null.
187 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
188 *p = encode_heap_oop(v);
189 }
190 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
191
192 // Store heap oop as is for volatile fields.
193 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
194 OrderAccess::release_store_ptr(p, v);
195 }
196 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
197 narrowOop v) {
198 OrderAccess::release_store(p, v);
199 }
200
201 inline void oopDesc::release_encode_store_heap_oop_not_null(
202 volatile narrowOop* p, oop v) {
203 // heap oop is not pointer sized.
204 OrderAccess::release_store(p, encode_heap_oop_not_null(v));
205 }
206
207 inline void oopDesc::release_encode_store_heap_oop_not_null(
208 volatile oop* p, oop v) {
209 OrderAccess::release_store_ptr(p, v);
210 }
211
212 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
213 oop v) {
214 OrderAccess::release_store_ptr(p, v);
215 }
216 inline void oopDesc::release_encode_store_heap_oop(
217 volatile narrowOop* p, oop v) {
218 OrderAccess::release_store(p, encode_heap_oop(v));
219 }
220
221
222 // These functions are only used to exchange oop fields in instances,
223 // not headers.
224 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
225 if (UseCompressedOops) {
226 // encode exchange value from oop to T
227 narrowOop val = encode_heap_oop(exchange_value);
228 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
229 // decode old from T to oop
230 return decode_heap_oop(old);
231 } else {
232 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
233 }
234 }
235
236 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
237 volatile HeapWord *dest,
238 oop compare_value) {
239 if (UseCompressedOops) {
240 // encode exchange and compare value from oop to T
241 narrowOop val = encode_heap_oop(exchange_value);
242 narrowOop cmp = encode_heap_oop(compare_value);
243
244 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
245 // decode old from T to oop
246 return decode_heap_oop(old);
247 } else {
248 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
249 }
250 }
251
252 // In order to put or get a field out of an instance, must first check
253 // if the field has been compressed and uncompress it.
254 inline oop oopDesc::obj_field(int offset) const {
255 return UseCompressedOops ?
256 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
257 load_decode_heap_oop(obj_field_addr<oop>(offset));
258 }
259 inline void oopDesc::obj_field_put(int offset, oop value) {
260 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
261 oop_store(obj_field_addr<oop>(offset), value);
262 }
263 inline void oopDesc::obj_field_raw_put(int offset, oop value) {
264 UseCompressedOops ?
265 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
266 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
267 }
85 268
86 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } 269 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
87 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } 270 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
88 271
89 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } 272 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); }
105 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } 288 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
106 289
107 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } 290 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
108 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } 291 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
109 292
110 inline oop oopDesc::obj_field_acquire(int offset) const { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); } 293 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
111 inline void oopDesc::release_obj_field_put(int offset, oop value) { oop_store((volatile oop*)obj_field_addr(offset), value); } 294 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
295
296 inline oop oopDesc::obj_field_acquire(int offset) const {
297 return UseCompressedOops ?
298 decode_heap_oop((narrowOop)
299 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
300 : decode_heap_oop((oop)
301 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
302 }
303 inline void oopDesc::release_obj_field_put(int offset, oop value) {
304 UseCompressedOops ?
305 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
306 oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
307 }
112 308
113 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } 309 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
114 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } 310 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
115 311
116 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } 312 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
131 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); } 327 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); }
132 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); } 328 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); }
133 329
134 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } 330 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
135 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } 331 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
136
137 332
138 inline int oopDesc::size_given_klass(Klass* klass) { 333 inline int oopDesc::size_given_klass(Klass* klass) {
139 int lh = klass->layout_helper(); 334 int lh = klass->layout_helper();
140 int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 335 int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
141 336
198 // If and when UseParallelGC uses the same obj array oop stealing/chunking 393 // If and when UseParallelGC uses the same obj array oop stealing/chunking
199 // technique, or when G1 is integrated (and currently uses this array chunking 394 // technique, or when G1 is integrated (and currently uses this array chunking
200 // technique) we will need to suitably modify the assertion. 395 // technique) we will need to suitably modify the assertion.
201 assert((s == klass->oop_size(this)) || 396 assert((s == klass->oop_size(this)) ||
202 (((UseParNewGC || UseParallelGC) && 397 (((UseParNewGC || UseParallelGC) &&
203 Universe::heap()->is_gc_active()) && 398 Universe::heap()->is_gc_active()) &&
204 (is_typeArray() || 399 (is_typeArray() ||
205 (is_objArray() && is_forwarded()))), 400 (is_objArray() && is_forwarded()))),
206 "wrong array object size"); 401 "wrong array object size");
207 } else { 402 } else {
208 // Must be zero, so bite the bullet and take the virtual call. 403 // Must be zero, so bite the bullet and take the virtual call.
222 417
223 inline bool oopDesc::is_parsable() { 418 inline bool oopDesc::is_parsable() {
224 return blueprint()->oop_is_parsable(this); 419 return blueprint()->oop_is_parsable(this);
225 } 420 }
226 421
227 422 inline void update_barrier_set(void* p, oop v) {
228 inline void update_barrier_set(oop *p, oop v) {
229 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 423 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
230 oopDesc::bs()->write_ref_field(p, v); 424 oopDesc::bs()->write_ref_field(p, v);
231 } 425 }
232 426
233 427 template <class T> inline void oop_store(T* p, oop v) {
234 inline void oop_store(oop* p, oop v) {
235 if (always_do_update_barrier) { 428 if (always_do_update_barrier) {
236 oop_store((volatile oop*)p, v); 429 oop_store((volatile T*)p, v);
237 } else { 430 } else {
238 *p = v; 431 oopDesc::encode_store_heap_oop(p, v);
239 update_barrier_set(p, v); 432 update_barrier_set(p, v);
240 } 433 }
241 } 434 }
242 435
243 inline void oop_store(volatile oop* p, oop v) { 436 template <class T> inline void oop_store(volatile T* p, oop v) {
244 // Used by release_obj_field_put, so use release_store_ptr. 437 // Used by release_obj_field_put, so use release_store_ptr.
245 OrderAccess::release_store_ptr(p, v); 438 oopDesc::release_encode_store_heap_oop(p, v);
246 update_barrier_set((oop *)p, v); 439 update_barrier_set((void*)p, v);
247 } 440 }
248 441
249 inline void oop_store_without_check(oop* p, oop v) { 442 template <class T> inline void oop_store_without_check(T* p, oop v) {
250 // XXX YSR FIX ME!!!
251 if (always_do_update_barrier) {
252 oop_store(p, v);
253 } else {
254 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
255 "oop store without store check failed");
256 *p = v;
257 }
258 }
259
260 // When it absolutely has to get there.
261 inline void oop_store_without_check(volatile oop* p, oop v) {
262 // XXX YSR FIX ME!!! 443 // XXX YSR FIX ME!!!
263 if (always_do_update_barrier) { 444 if (always_do_update_barrier) {
264 oop_store(p, v); 445 oop_store(p, v);
265 } else { 446 } else {
266 assert(!Universe::heap()->barrier_set()-> 447 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
267 write_ref_needs_barrier((oop *)p, v),
268 "oop store without store check failed"); 448 "oop store without store check failed");
269 OrderAccess::release_store_ptr(p, v); 449 oopDesc::encode_store_heap_oop(p, v);
270 } 450 }
271 } 451 }
272 452
453 // When it absolutely has to get there.
454 template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
455 // XXX YSR FIX ME!!!
456 if (always_do_update_barrier) {
457 oop_store(p, v);
458 } else {
459 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
460 "oop store without store check failed");
461 oopDesc::release_encode_store_heap_oop(p, v);
462 }
463 }
464
465 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
466 // (without having to remember the function name this calls).
467 inline void oop_store_raw(HeapWord* addr, oop value) {
468 if (UseCompressedOops) {
469 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
470 } else {
471 oopDesc::encode_store_heap_oop((oop*)addr, value);
472 }
473 }
273 474
274 // Used only for markSweep, scavenging 475 // Used only for markSweep, scavenging
275 inline bool oopDesc::is_gc_marked() const { 476 inline bool oopDesc::is_gc_marked() const {
276 return mark()->is_marked(); 477 return mark()->is_marked();
277 } 478 }
338 // used only for asserts 539 // used only for asserts
339 inline bool oopDesc::is_unlocked_oop() const { 540 inline bool oopDesc::is_unlocked_oop() const {
340 if (!Universe::heap()->is_in_reserved(this)) return false; 541 if (!Universe::heap()->is_in_reserved(this)) return false;
341 return mark()->is_unlocked(); 542 return mark()->is_unlocked();
342 } 543 }
343
344
345 #endif // PRODUCT 544 #endif // PRODUCT
346 545
347 inline void oopDesc::follow_header() { 546 inline void oopDesc::follow_header() {
348 MarkSweep::mark_and_push((oop*)&_klass); 547 if (UseCompressedOops) {
349 } 548 MarkSweep::mark_and_push(compressed_klass_addr());
350 549 } else {
351 inline void oopDesc::follow_contents() { 550 MarkSweep::mark_and_push(klass_addr());
551 }
552 }
553
554 inline void oopDesc::follow_contents(void) {
352 assert (is_gc_marked(), "should be marked"); 555 assert (is_gc_marked(), "should be marked");
353 blueprint()->oop_follow_contents(this); 556 blueprint()->oop_follow_contents(this);
354 } 557 }
355 558
356 559
359 inline bool oopDesc::is_forwarded() const { 562 inline bool oopDesc::is_forwarded() const {
360 // The extra heap check is needed since the obj might be locked, in which case the 563 // The extra heap check is needed since the obj might be locked, in which case the
361 // mark would point to a stack location and have the sentinel bit cleared 564 // mark would point to a stack location and have the sentinel bit cleared
362 return mark()->is_marked(); 565 return mark()->is_marked();
363 } 566 }
364
365 567
366 // Used by scavengers 568 // Used by scavengers
367 inline void oopDesc::forward_to(oop p) { 569 inline void oopDesc::forward_to(oop p) {
368 assert(Universe::heap()->is_in_reserved(p), 570 assert(Universe::heap()->is_in_reserved(p),
369 "forwarding to something not in heap"); 571 "forwarding to something not in heap");
382 } 584 }
383 585
384 // Note that the forwardee is not the same thing as the displaced_mark. 586 // Note that the forwardee is not the same thing as the displaced_mark.
385 // The forwardee is used when copying during scavenge and mark-sweep. 587 // The forwardee is used when copying during scavenge and mark-sweep.
386 // It does need to clear the low two locking- and GC-related bits. 588 // It does need to clear the low two locking- and GC-related bits.
387 inline oop oopDesc::forwardee() const { return (oop) mark()->decode_pointer(); } 589 inline oop oopDesc::forwardee() const {
388 590 return (oop) mark()->decode_pointer();
591 }
389 592
390 inline bool oopDesc::has_displaced_mark() const { 593 inline bool oopDesc::has_displaced_mark() const {
391 return mark()->has_displaced_mark_helper(); 594 return mark()->has_displaced_mark_helper();
392 } 595 }
393 596
430 } else { 633 } else {
431 return slow_identity_hash(); 634 return slow_identity_hash();
432 } 635 }
433 } 636 }
434 637
435
436 inline void oopDesc::oop_iterate_header(OopClosure* blk) { 638 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
437 blk->do_oop((oop*)&_klass); 639 if (UseCompressedOops) {
438 } 640 blk->do_oop(compressed_klass_addr());
439 641 } else {
642 blk->do_oop(klass_addr());
643 }
644 }
440 645
441 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) { 646 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
442 if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass); 647 if (UseCompressedOops) {
443 } 648 if (mr.contains(compressed_klass_addr())) {
444 649 blk->do_oop(compressed_klass_addr());
650 }
651 } else {
652 if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
653 }
654 }
445 655
446 inline int oopDesc::adjust_pointers() { 656 inline int oopDesc::adjust_pointers() {
447 debug_only(int check_size = size()); 657 debug_only(int check_size = size());
448 int s = blueprint()->oop_adjust_pointers(this); 658 int s = blueprint()->oop_adjust_pointers(this);
449 assert(s == check_size, "should be the same"); 659 assert(s == check_size, "should be the same");
450 return s; 660 return s;
451 } 661 }
452 662
453 inline void oopDesc::adjust_header() { 663 inline void oopDesc::adjust_header() {
454 MarkSweep::adjust_pointer((oop*)&_klass); 664 if (UseCompressedOops) {
665 MarkSweep::adjust_pointer(compressed_klass_addr());
666 } else {
667 MarkSweep::adjust_pointer(klass_addr());
668 }
455 } 669 }
456 670
457 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 671 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
458 \ 672 \
459 inline int oopDesc::oop_iterate(OopClosureType* blk) { \ 673 inline int oopDesc::oop_iterate(OopClosureType* blk) { \