comparison src/share/vm/oops/oop.inline.hpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents b0efc7ee3b31
children 46c017102631
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
1 /* 1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
28 #include "gc_implementation/shared/ageTable.hpp" 28 #include "gc_implementation/shared/ageTable.hpp"
29 #include "gc_implementation/shared/markSweep.inline.hpp" 29 #include "gc_implementation/shared/markSweep.inline.hpp"
30 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "gc_interface/collectedHeap.inline.hpp"
31 #include "memory/barrierSet.inline.hpp" 31 #include "memory/barrierSet.inline.hpp"
32 #include "memory/cardTableModRefBS.hpp" 32 #include "memory/cardTableModRefBS.hpp"
33 #include "memory/compactingPermGenGen.hpp"
34 #include "memory/genCollectedHeap.hpp" 33 #include "memory/genCollectedHeap.hpp"
35 #include "memory/generation.hpp" 34 #include "memory/generation.hpp"
36 #include "memory/permGen.hpp"
37 #include "memory/specialized_oop_closures.hpp" 35 #include "memory/specialized_oop_closures.hpp"
38 #include "oops/arrayKlass.hpp" 36 #include "oops/arrayKlass.hpp"
39 #include "oops/arrayOop.hpp" 37 #include "oops/arrayOop.hpp"
40 #include "oops/klass.hpp" 38 #include "oops/klass.hpp"
41 #include "oops/klassOop.hpp"
42 #include "oops/markOop.inline.hpp" 39 #include "oops/markOop.inline.hpp"
43 #include "oops/oop.hpp" 40 #include "oops/oop.hpp"
44 #include "runtime/atomic.hpp" 41 #include "runtime/atomic.hpp"
45 #include "runtime/os.hpp" 42 #include "runtime/os.hpp"
46 #ifdef TARGET_ARCH_x86 43 #ifdef TARGET_ARCH_x86
68 65
69 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 66 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
70 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 67 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
71 } 68 }
72 69
73 inline klassOop oopDesc::klass() const { 70 inline Klass* oopDesc::klass() const {
74 if (UseCompressedOops) { 71 if (UseCompressedKlassPointers) {
75 return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass); 72 return decode_klass_not_null(_metadata._compressed_klass);
76 } else { 73 } else {
77 return _metadata._klass; 74 return _metadata._klass;
78 } 75 }
79 } 76 }
80 77
81 inline klassOop oopDesc::klass_or_null() const volatile { 78 inline Klass* oopDesc::klass_or_null() const volatile {
82 // can be NULL in CMS 79 // can be NULL in CMS
83 if (UseCompressedOops) { 80 if (UseCompressedKlassPointers) {
84 return (klassOop)decode_heap_oop(_metadata._compressed_klass); 81 return decode_klass(_metadata._compressed_klass);
85 } else { 82 } else {
86 return _metadata._klass; 83 return _metadata._klass;
87 } 84 }
88 } 85 }
89 86
90 inline int oopDesc::klass_gap_offset_in_bytes() { 87 inline int oopDesc::klass_gap_offset_in_bytes() {
91 assert(UseCompressedOops, "only applicable to compressed headers"); 88 assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
92 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop); 89 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
93 } 90 }
94 91
95 inline oop* oopDesc::klass_addr() { 92 inline Klass** oopDesc::klass_addr() {
96 // Only used internally and with CMS and will not work with 93 // Only used internally and with CMS and will not work with
97 // UseCompressedOops 94 // UseCompressedOops
98 assert(!UseCompressedOops, "only supported with uncompressed oops"); 95 assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
99 return (oop*) &_metadata._klass; 96 return (Klass**) &_metadata._klass;
100 } 97 }
101 98
102 inline narrowOop* oopDesc::compressed_klass_addr() { 99 inline narrowOop* oopDesc::compressed_klass_addr() {
103 assert(UseCompressedOops, "only called by compressed oops"); 100 assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
104 return (narrowOop*) &_metadata._compressed_klass; 101 return (narrowOop*) &_metadata._compressed_klass;
105 } 102 }
106 103
107 inline void oopDesc::set_klass(klassOop k) { 104 inline void oopDesc::set_klass(Klass* k) {
108 // since klasses are promoted no store check is needed 105 // since klasses are promoted no store check is needed
109 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop"); 106 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
110 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop"); 107 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
111 if (UseCompressedOops) { 108 if (UseCompressedKlassPointers) {
112 oop_store_without_check(compressed_klass_addr(), (oop)k); 109 *compressed_klass_addr() = encode_klass_not_null(k);
113 } else { 110 } else {
114 oop_store_without_check(klass_addr(), (oop) k); 111 *klass_addr() = k;
115 } 112 }
116 } 113 }
117 114
118 inline int oopDesc::klass_gap() const { 115 inline int oopDesc::klass_gap() const {
119 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 116 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
120 } 117 }
121 118
122 inline void oopDesc::set_klass_gap(int v) { 119 inline void oopDesc::set_klass_gap(int v) {
123 if (UseCompressedOops) { 120 if (UseCompressedKlassPointers) {
124 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 121 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
125 } 122 }
126 } 123 }
127 124
128 inline void oopDesc::set_klass_to_list_ptr(oop k) { 125 inline void oopDesc::set_klass_to_list_ptr(oop k) {
129 // This is only to be used during GC, for from-space objects, so no 126 // This is only to be used during GC, for from-space objects, so no
130 // barrier is needed. 127 // barrier is needed.
131 if (UseCompressedOops) { 128 if (UseCompressedKlassPointers) {
132 _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling) 129 _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling)
133 } else { 130 } else {
134 _metadata._klass = (klassOop)k; 131 _metadata._klass = (Klass*)(address)k;
132 }
133 }
134
135 inline oop oopDesc::list_ptr_from_klass() {
136 // This is only to be used during GC, for from-space objects.
137 if (UseCompressedKlassPointers) {
138 return (oop)decode_heap_oop((oop)(address)_metadata._compressed_klass);
139 } else {
140 // Special case for GC
141 return (oop)(address)_metadata._klass;
135 } 142 }
136 } 143 }
137 144
138 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } 145 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
139 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); } 146
140 147 inline bool oopDesc::is_a(Klass* k) const { return klass()->is_subtype_of(k); }
141 inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); } 148
142 149 inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); }
143 inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); } 150 inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); }
144 inline bool oopDesc::is_instanceMirror() const { return blueprint()->oop_is_instanceMirror(); } 151 inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); }
145 inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); } 152 inline bool oopDesc::is_array() const { return klass()->oop_is_array(); }
146 inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); } 153 inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); }
147 inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); } 154 inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); }
148 inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); }
149 inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); }
150 inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); }
151 inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); }
152 inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); }
153 inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); }
154 inline bool oopDesc::is_methodData() const { return blueprint()->oop_is_methodData(); }
155 inline bool oopDesc::is_constantPool() const { return blueprint()->oop_is_constantPool(); }
156 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); }
157 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); }
158 155
159 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 156 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
160 157
161 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); } 158 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
159 inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
162 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 160 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
163 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 161 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
164 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } 162 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
165 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 163 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
166 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 164 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
175 // is narrowOop. All functions are overloaded so they can be called by 173 // is narrowOop. All functions are overloaded so they can be called by
176 // template functions without conditionals (the compiler instantiates via 174 // template functions without conditionals (the compiler instantiates via
177 // the right type and inlines the appopriate code). 175 // the right type and inlines the appopriate code).
178 176
179 inline bool oopDesc::is_null(oop obj) { return obj == NULL; } 177 inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
178 inline bool oopDesc::is_null(Klass* obj) { return obj == NULL; }
180 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } 179 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
181 180
182 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 181 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
183 // offset from the heap base. Saving the check for null can save instructions 182 // offset from the heap base. Saving the check for null can save instructions
184 // in inner GC loops so these are separated. 183 // in inner GC loops so these are separated.
185 184
186 inline bool check_obj_alignment(oop obj) { 185 inline bool check_obj_alignment(oop obj) {
186 return (intptr_t)obj % MinObjAlignmentInBytes == 0;
187 }
188 inline bool check_obj_alignment(Klass* obj) {
187 return (intptr_t)obj % MinObjAlignmentInBytes == 0; 189 return (intptr_t)obj % MinObjAlignmentInBytes == 0;
188 } 190 }
189 191
190 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 192 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
191 assert(!is_null(v), "oop value can never be zero"); 193 assert(!is_null(v), "oop value can never be zero");
219 } 221 }
220 222
221 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } 223 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
222 inline oop oopDesc::decode_heap_oop(oop v) { return v; } 224 inline oop oopDesc::decode_heap_oop(oop v) { return v; }
223 225
226 // Encoding and decoding for klass field. It is copied code, but someday
227 // might not be the same as oop.
228
229 inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
230 assert(!is_null(v), "oop value can never be zero");
231 assert(check_obj_alignment(v), "Address not aligned");
232 address base = Universe::narrow_oop_base();
233 int shift = Universe::narrow_oop_shift();
234 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
235 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
236 uint64_t result = pd >> shift;
237 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
238 assert(decode_klass(result) == v, "reversibility");
239 return (narrowOop)result;
240 }
241
242 inline narrowOop oopDesc::encode_klass(Klass* v) {
243 return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v);
244 }
245
246 inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
247 assert(!is_null(v), "narrow oop value can never be zero");
248 address base = Universe::narrow_oop_base();
249 int shift = Universe::narrow_oop_shift();
250 Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
251 assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
252 return result;
253 }
254
255 inline Klass* oopDesc::decode_klass(narrowOop v) {
256 return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
257 }
258
224 // Load an oop out of the Java heap as is without decoding. 259 // Load an oop out of the Java heap as is without decoding.
225 // Called by GC to check for null before decoding. 260 // Called by GC to check for null before decoding.
226 inline oop oopDesc::load_heap_oop(oop* p) { return *p; } 261 inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
227 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } 262 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
228 263
296 } else { 331 } else {
297 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); 332 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
298 } 333 }
299 } 334 }
300 335
301 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
302 volatile HeapWord *dest,
303 oop compare_value) {
304 if (UseCompressedOops) {
305 // encode exchange and compare value from oop to T
306 narrowOop val = encode_heap_oop(exchange_value);
307 narrowOop cmp = encode_heap_oop(compare_value);
308
309 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
310 // decode old from T to oop
311 return decode_heap_oop(old);
312 } else {
313 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
314 }
315 }
316
317 // In order to put or get a field out of an instance, must first check 336 // In order to put or get a field out of an instance, must first check
318 // if the field has been compressed and uncompress it. 337 // if the field has been compressed and uncompress it.
319 inline oop oopDesc::obj_field(int offset) const { 338 inline oop oopDesc::obj_field(int offset) const {
320 return UseCompressedOops ? 339 return UseCompressedOops ?
321 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) : 340 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
328 } 347 }
329 inline void oopDesc::obj_field_put(int offset, oop value) { 348 inline void oopDesc::obj_field_put(int offset, oop value) {
330 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) : 349 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
331 oop_store(obj_field_addr<oop>(offset), value); 350 oop_store(obj_field_addr<oop>(offset), value);
332 } 351 }
352
353 inline Metadata* oopDesc::metadata_field(int offset) const {
354 return *metadata_field_addr(offset);
355 }
356
357 inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
358 *metadata_field_addr(offset) = value;
359 }
360
333 inline void oopDesc::obj_field_put_raw(int offset, oop value) { 361 inline void oopDesc::obj_field_put_raw(int offset, oop value) {
334 UseCompressedOops ? 362 UseCompressedOops ?
335 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) : 363 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
336 encode_store_heap_oop(obj_field_addr<oop>(offset), value); 364 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
337 } 365 }
493 return s; 521 return s;
494 } 522 }
495 523
496 524
497 inline int oopDesc::size() { 525 inline int oopDesc::size() {
498 return size_given_klass(blueprint()); 526 return size_given_klass(klass());
499 }
500
501 inline bool oopDesc::is_parsable() {
502 return blueprint()->oop_is_parsable(this);
503 }
504
505 inline bool oopDesc::is_conc_safe() {
506 return blueprint()->oop_is_conc_safe(this);
507 } 527 }
508 528
509 inline void update_barrier_set(void* p, oop v) { 529 inline void update_barrier_set(void* p, oop v) {
510 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 530 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
511 oopDesc::bs()->write_ref_field(p, v); 531 oopDesc::bs()->write_ref_field(p, v);
530 // Used by release_obj_field_put, so use release_store_ptr. 550 // Used by release_obj_field_put, so use release_store_ptr.
531 oopDesc::release_encode_store_heap_oop(p, v); 551 oopDesc::release_encode_store_heap_oop(p, v);
532 update_barrier_set((void*)p, v); // cast away type 552 update_barrier_set((void*)p, v); // cast away type
533 } 553 }
534 554
535 template <class T> inline void oop_store_without_check(T* p, oop v) {
536 // XXX YSR FIX ME!!!
537 if (always_do_update_barrier) {
538 oop_store(p, v);
539 } else {
540 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
541 "oop store without store check failed");
542 oopDesc::encode_store_heap_oop(p, v);
543 }
544 }
545
546 // When it absolutely has to get there.
547 template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
548 // XXX YSR FIX ME!!!
549 if (always_do_update_barrier) {
550 oop_store(p, v);
551 } else {
552 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
553 "oop store without store check failed");
554 oopDesc::release_encode_store_heap_oop(p, v);
555 }
556 }
557
558 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops 555 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
559 // (without having to remember the function name this calls). 556 // (without having to remember the function name this calls).
560 inline void oop_store_raw(HeapWord* addr, oop value) { 557 inline void oop_store_raw(HeapWord* addr, oop value) {
561 if (UseCompressedOops) { 558 if (UseCompressedOops) {
562 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); 559 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
563 } else { 560 } else {
564 oopDesc::encode_store_heap_oop((oop*)addr, value); 561 oopDesc::encode_store_heap_oop((oop*)addr, value);
562 }
563 }
564
565 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
566 volatile HeapWord *dest,
567 oop compare_value,
568 bool prebarrier) {
569 if (UseCompressedOops) {
570 if (prebarrier) {
571 update_barrier_set_pre((narrowOop*)dest, exchange_value);
572 }
573 // encode exchange and compare value from oop to T
574 narrowOop val = encode_heap_oop(exchange_value);
575 narrowOop cmp = encode_heap_oop(compare_value);
576
577 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
578 // decode old from T to oop
579 return decode_heap_oop(old);
580 } else {
581 if (prebarrier) {
582 update_barrier_set_pre((oop*)dest, exchange_value);
583 }
584 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
565 } 585 }
566 } 586 }
567 587
568 // Used only for markSweep, scavenging 588 // Used only for markSweep, scavenging
569 inline bool oopDesc::is_gc_marked() const { 589 inline bool oopDesc::is_gc_marked() const {
587 inline bool oopDesc::is_oop(bool ignore_mark_word) const { 607 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
588 oop obj = (oop) this; 608 oop obj = (oop) this;
589 if (!check_obj_alignment(obj)) return false; 609 if (!check_obj_alignment(obj)) return false;
590 if (!Universe::heap()->is_in_reserved(obj)) return false; 610 if (!Universe::heap()->is_in_reserved(obj)) return false;
591 // obj is aligned and accessible in heap 611 // obj is aligned and accessible in heap
592 // try to find metaclass cycle safely without seg faulting on bad input 612 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
593 // we should reach klassKlassObj by following klass link at most 3 times
594 for (int i = 0; i < 3; i++) {
595 obj = obj->klass_or_null();
596 // klass should be aligned and in permspace
597 if (!check_obj_alignment(obj)) return false;
598 if (!Universe::heap()->is_in_permanent(obj)) return false;
599 }
600 if (obj != Universe::klassKlassObj()) {
601 // During a dump, the _klassKlassObj moved to a shared space.
602 if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
603 return true;
604 }
605 return false;
606 }
607 613
608 // Header verification: the mark is typically non-NULL. If we're 614 // Header verification: the mark is typically non-NULL. If we're
609 // at a safepoint, it must not be null. 615 // at a safepoint, it must not be null.
610 // Outside of a safepoint, the header could be changing (for example, 616 // Outside of a safepoint, the header could be changing (for example,
611 // another thread could be inflating a lock on this object). 617 // another thread could be inflating a lock on this object).
630 if (!Universe::heap()->is_in_reserved(this)) return false; 636 if (!Universe::heap()->is_in_reserved(this)) return false;
631 return mark()->is_unlocked(); 637 return mark()->is_unlocked();
632 } 638 }
633 #endif // PRODUCT 639 #endif // PRODUCT
634 640
635 inline void oopDesc::follow_header() {
636 if (UseCompressedOops) {
637 MarkSweep::mark_and_push(compressed_klass_addr());
638 } else {
639 MarkSweep::mark_and_push(klass_addr());
640 }
641 }
642
643 inline void oopDesc::follow_contents(void) { 641 inline void oopDesc::follow_contents(void) {
644 assert (is_gc_marked(), "should be marked"); 642 assert (is_gc_marked(), "should be marked");
645 blueprint()->oop_follow_contents(this); 643 klass()->oop_follow_contents(this);
646 } 644 }
647
648 645
649 // Used by scavengers 646 // Used by scavengers
650 647
651 inline bool oopDesc::is_forwarded() const { 648 inline bool oopDesc::is_forwarded() const {
652 // The extra heap check is needed since the obj might be locked, in which case the 649 // The extra heap check is needed since the obj might be locked, in which case the
726 } else { 723 } else {
727 return slow_identity_hash(); 724 return slow_identity_hash();
728 } 725 }
729 } 726 }
730 727
731 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
732 if (UseCompressedOops) {
733 blk->do_oop(compressed_klass_addr());
734 } else {
735 blk->do_oop(klass_addr());
736 }
737 }
738
739 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
740 if (UseCompressedOops) {
741 if (mr.contains(compressed_klass_addr())) {
742 blk->do_oop(compressed_klass_addr());
743 }
744 } else {
745 if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
746 }
747 }
748
749 inline int oopDesc::adjust_pointers() { 728 inline int oopDesc::adjust_pointers() {
750 debug_only(int check_size = size()); 729 debug_only(int check_size = size());
751 int s = blueprint()->oop_adjust_pointers(this); 730 int s = klass()->oop_adjust_pointers(this);
752 assert(s == check_size, "should be the same"); 731 assert(s == check_size, "should be the same");
753 return s; 732 return s;
754 }
755
756 inline void oopDesc::adjust_header() {
757 if (UseCompressedOops) {
758 MarkSweep::adjust_pointer(compressed_klass_addr());
759 } else {
760 MarkSweep::adjust_pointer(klass_addr());
761 }
762 } 733 }
763 734
764 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 735 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
765 \ 736 \
766 inline int oopDesc::oop_iterate(OopClosureType* blk) { \ 737 inline int oopDesc::oop_iterate(OopClosureType* blk) { \
767 SpecializationStats::record_call(); \ 738 SpecializationStats::record_call(); \
768 return blueprint()->oop_oop_iterate##nv_suffix(this, blk); \ 739 return klass()->oop_oop_iterate##nv_suffix(this, blk); \
769 } \ 740 } \
770 \ 741 \
771 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 742 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
772 SpecializationStats::record_call(); \ 743 SpecializationStats::record_call(); \
773 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ 744 return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
745 }
746
747
748 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
749 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
750 // the do_oop calls, but turns off all other features in ExtendedOopClosure.
751 NoHeaderExtendedOopClosure cl(blk);
752 return oop_iterate(&cl);
753 }
754
755 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
756 NoHeaderExtendedOopClosure cl(blk);
757 return oop_iterate(&cl, mr);
774 } 758 }
775 759
776 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) 760 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
777 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) 761 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
778 762
779 #ifndef SERIALGC 763 #ifndef SERIALGC
780 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 764 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
781 \ 765 \
782 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 766 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
783 SpecializationStats::record_call(); \ 767 SpecializationStats::record_call(); \
784 return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 768 return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
785 } 769 }
786 770
787 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) 771 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
788 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) 772 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
789 #endif // !SERIALGC 773 #endif // !SERIALGC
790 774
791 inline bool oopDesc::is_shared() const {
792 return CompactingPermGenGen::is_shared(this);
793 }
794
795 inline bool oopDesc::is_shared_readonly() const {
796 return CompactingPermGenGen::is_shared_readonly(this);
797 }
798
799 inline bool oopDesc::is_shared_readwrite() const {
800 return CompactingPermGenGen::is_shared_readwrite(this);
801 }
802
803 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP 775 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP