comparison src/share/vm/oops/cpCache.hpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents src/share/vm/oops/cpCacheOop.hpp@1d7922586cf6
children 4bfe8b33cf66
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
1 /*
2 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_CPCACHEOOP_HPP
26 #define SHARE_VM_OOPS_CPCACHEOOP_HPP
27
28 #include "interpreter/bytecodes.hpp"
29 #include "memory/allocation.hpp"
30 #include "utilities/array.hpp"
31
32 class PSPromotionManager;
33
34 // A ConstantPoolCacheEntry describes an individual entry of the constant
35 // pool cache. There's 2 principal kinds of entries: field entries for in-
36 // stance & static field access, and method entries for invokes. Some of
37 // the entry layout is shared and looks as follows:
38 //
39 // bit number |31 0|
40 // bit length |-8--|-8--|---16----|
41 // --------------------------------
42 // _indices [ b2 | b1 | index ] index = constant_pool_index
43 // _f1 [ entry specific ] metadata ptr (method or klass)
44 // _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr
45 // _flags [tos|0|F=1|0|0|f|v|0 |00000|field_index] (for field entries)
46 // bit length [ 4 |1| 1 |1|1|1|1|1 |--5--|----16-----]
47 // _flags [tos|0|F=0|A|I|f|0|vf|00000|00000|psize] (for method entries)
48 // bit length [ 4 |1| 1 |1|1|1|1|1 |--5--|--8--|--8--]
49
50 // --------------------------------
51 //
52 // with:
53 // index = original constant pool index
54 // b1 = bytecode 1
55 // b2 = bytecode 2
56 // psize = parameters size (method entries only)
57 // field_index = index into field information in holder InstanceKlass
58 // The index max is 0xffff (max number of fields in constant pool)
59 // and is multiplied by (InstanceKlass::next_offset) when accessing.
60 // tos = TosState
61 // F = the entry is for a field (or F=0 for a method)
62 // A = call site has an appendix argument (loaded from resolved references)
63 // I = interface call is forced virtual (must use a vtable index or vfinal)
64 // f = field or method is final
65 // v = field is volatile
66 // vf = virtual but final (method entries only: is_vfinal())
67 //
68 // The flags after TosState have the following interpretation:
69 // bit 27: 0 for fields, 1 for methods
70 // f flag true if field is marked final
71 // v flag true if field is volatile (only for fields)
72 // f2 flag true if f2 contains an oop (e.g., virtual final method)
73 // fv flag true if invokeinterface used for method in class Object
74 //
75 // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the
76 // following mapping to the TosState states:
77 //
78 // btos: 0
79 // ctos: 1
80 // stos: 2
81 // itos: 3
82 // ltos: 4
83 // ftos: 5
84 // dtos: 6
85 // atos: 7
86 // vtos: 8
87 //
88 // Entry specific: field entries:
89 // _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index
90 // _f1 = field holder (as a java.lang.Class, not a Klass*)
91 // _f2 = field offset in bytes
92 // _flags = field type information, original FieldInfo index in field holder
93 // (field_index section)
94 //
95 // Entry specific: method entries:
96 // _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section),
97 // original constant pool index
98 // _f1 = Method* for non-virtual calls, unused by virtual calls.
99 // for interface calls, which are essentially virtual but need a klass,
100 // contains Klass* for the corresponding interface.
101 // for invokedynamic, f1 contains a site-specific CallSite object (as an appendix)
102 // for invokehandle, f1 contains a site-specific MethodType object (as an appendix)
103 // (upcoming metadata changes will move the appendix to a separate array)
104 // _f2 = vtable/itable index (or final Method*) for virtual calls only,
105 // unused by non-virtual. The is_vfinal flag indicates this is a
106 // method pointer for a final method, not an index.
107 // _flags = method type info (t section),
108 // virtual final bit (vfinal),
109 // parameter size (psize section)
110 //
111 // Note: invokevirtual & invokespecial bytecodes can share the same constant
112 // pool entry and thus the same constant pool cache entry. All invoke
113 // bytecodes but invokevirtual use only _f1 and the corresponding b1
114 // bytecode, while invokevirtual uses only _f2 and the corresponding
115 // b2 bytecode. The value of _flags is shared for both types of entries.
116 //
117 // The fields are volatile so that they are stored in the order written in the
118 // source code. The _indices field with the bytecode must be written last.
119
120 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
121 friend class VMStructs;
122 friend class constantPoolCacheKlass;
123 friend class ConstantPool;
124 friend class InterpreterRuntime;
125
126 private:
127 volatile intx _indices; // constant pool index & rewrite bytecodes
128 volatile Metadata* _f1; // entry specific metadata field
129 volatile intx _f2; // entry specific int/metadata field
130 volatile intx _flags; // flags
131
132
133 void set_bytecode_1(Bytecodes::Code code);
134 void set_bytecode_2(Bytecodes::Code code);
135 void set_f1(Metadata* f1) {
136 Metadata* existing_f1 = (Metadata*)_f1; // read once
137 assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
138 _f1 = f1;
139 }
140 void release_set_f1(Metadata* f1);
141 void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; }
142 void set_f2_as_vfinal_method(Method* f2) { assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); assert(is_vfinal(), "flags must be set"); _f2 = (intptr_t) f2; }
143 int make_flags(TosState state, int option_bits, int field_index_or_method_params);
144 void set_flags(intx flags) { _flags = flags; }
145 bool init_flags_atomic(intx flags);
146 void set_field_flags(TosState field_type, int option_bits, int field_index) {
147 assert((field_index & field_index_mask) == field_index, "field_index in range");
148 set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
149 }
150 void set_method_flags(TosState return_type, int option_bits, int method_params) {
151 assert((method_params & parameter_size_mask) == method_params, "method_params in range");
152 set_flags(make_flags(return_type, option_bits, method_params));
153 }
154 bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) {
155 assert((method_params & parameter_size_mask) == method_params, "method_params in range");
156 return init_flags_atomic(make_flags(return_type, option_bits, method_params));
157 }
158
159 public:
160 // specific bit definitions for the flags field:
161 // (Note: the interpreter must use these definitions to access the CP cache.)
162 enum {
163 // high order bits are the TosState corresponding to field type or method return type
164 tos_state_bits = 4,
165 tos_state_mask = right_n_bits(tos_state_bits),
166 tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below
167 // misc. option bits; can be any bit position in [16..27]
168 is_field_entry_shift = 26, // (F) is it a field or a method?
169 has_appendix_shift = 25, // (A) does the call site have an appendix argument?
170 is_forced_virtual_shift = 24, // (I) is the interface reference forced to virtual mode?
171 is_final_shift = 23, // (f) is the field or method final?
172 is_volatile_shift = 22, // (v) is the field volatile?
173 is_vfinal_shift = 21, // (vf) did the call resolve to a final method?
174 // low order bits give field index (for FieldInfo) or method parameter size:
175 field_index_bits = 16,
176 field_index_mask = right_n_bits(field_index_bits),
177 parameter_size_bits = 8, // subset of field_index_mask, range is 0..255
178 parameter_size_mask = right_n_bits(parameter_size_bits),
179 option_bits_mask = ~(((-1) << tos_state_shift) | (field_index_mask | parameter_size_mask))
180 };
181
182 // specific bit definitions for the indices field:
183 enum {
184 cp_index_bits = 2*BitsPerByte,
185 cp_index_mask = right_n_bits(cp_index_bits),
186 bytecode_1_shift = cp_index_bits,
187 bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF
188 bytecode_2_shift = cp_index_bits + BitsPerByte,
189 bytecode_2_mask = right_n_bits(BitsPerByte) // == (u1)0xFF
190 };
191
192
193 // Initialization
194 void initialize_entry(int original_index); // initialize primary entry
195 void initialize_resolved_reference_index(int ref_index) {
196 assert(_f2 == 0, "set once"); // note: ref_index might be zero also
197 _f2 = ref_index;
198 }
199
200 void set_field( // sets entry to resolved field state
201 Bytecodes::Code get_code, // the bytecode used for reading the field
202 Bytecodes::Code put_code, // the bytecode used for writing the field
203 KlassHandle field_holder, // the object/klass holding the field
204 int orig_field_index, // the original field index in the field holder
205 int field_offset, // the field offset in words in the field holder
206 TosState field_type, // the (machine) field type
207 bool is_final, // the field is final
208 bool is_volatile, // the field is volatile
209 Klass* root_klass // needed by the GC to dirty the klass
210 );
211
212 void set_method( // sets entry to resolved method entry
213 Bytecodes::Code invoke_code, // the bytecode used for invoking the method
214 methodHandle method, // the method/prototype if any (NULL, otherwise)
215 int vtable_index // the vtable index if any, else negative
216 );
217
218 void set_interface_call(
219 methodHandle method, // Resolved method
220 int index // Method index into interface
221 );
222
223 void set_method_handle(
224 methodHandle method, // adapter for invokeExact, etc.
225 Handle appendix, // stored in refs[f2]; could be a java.lang.invoke.MethodType
226 objArrayHandle resolved_references
227 );
228
229 void set_dynamic_call(
230 methodHandle method, // adapter for this call site
231 Handle appendix, // stored in refs[f2]; could be a java.lang.invoke.CallSite
232 objArrayHandle resolved_references
233 );
234
235 // Common code for invokedynamic and MH invocations.
236
237 // The "appendix" is an optional call-site-specific parameter which is
238 // pushed by the JVM at the end of the argument list. This argument may
239 // be a MethodType for the MH.invokes and a CallSite for an invokedynamic
240 // instruction. However, its exact type and use depends on the Java upcall,
241 // which simply returns a compiled LambdaForm along with any reference
242 // that LambdaForm needs to complete the call. If the upcall returns a
243 // null appendix, the argument is not passed at all.
244 //
245 // The appendix is *not* represented in the signature of the symbolic
246 // reference for the call site, but (if present) it *is* represented in
247 // the Method* bound to the site. This means that static and dynamic
248 // resolution logic needs to make slightly different assessments about the
249 // number and types of arguments.
250 void set_method_handle_common(
251 Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic
252 methodHandle adapter, // invoker method (f1)
253 Handle appendix, // appendix such as CallSite, MethodType, etc. (refs[f2])
254 objArrayHandle resolved_references
255 );
256
257 Method* method_if_resolved(constantPoolHandle cpool);
258 oop appendix_if_resolved(constantPoolHandle cpool);
259
260 void set_parameter_size(int value);
261
262 // Which bytecode number (1 or 2) in the index field is valid for this bytecode?
263 // Returns -1 if neither is valid.
264 static int bytecode_number(Bytecodes::Code code) {
265 switch (code) {
266 case Bytecodes::_getstatic : // fall through
267 case Bytecodes::_getfield : // fall through
268 case Bytecodes::_invokespecial : // fall through
269 case Bytecodes::_invokestatic : // fall through
270 case Bytecodes::_invokeinterface : return 1;
271 case Bytecodes::_putstatic : // fall through
272 case Bytecodes::_putfield : // fall through
273 case Bytecodes::_invokehandle : // fall through
274 case Bytecodes::_invokedynamic : // fall through
275 case Bytecodes::_invokevirtual : return 2;
276 default : break;
277 }
278 return -1;
279 }
280
281 // Has this bytecode been resolved? Only valid for invokes and get/put field/static.
282 bool is_resolved(Bytecodes::Code code) const {
283 switch (bytecode_number(code)) {
284 case 1: return (bytecode_1() == code);
285 case 2: return (bytecode_2() == code);
286 }
287 return false; // default: not resolved
288 }
289
290 // Accessors
291 int indices() const { return _indices; }
292 int constant_pool_index() const { return (indices() & cp_index_mask); }
293 Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices() >> bytecode_1_shift) & bytecode_1_mask); }
294 Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices() >> bytecode_2_shift) & bytecode_2_mask); }
295 Method* f1_as_method() const { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
296 Klass* f1_as_klass() const { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
297 bool is_f1_null() const { Metadata* f1 = (Metadata*)_f1; return f1 == NULL; } // classifies a CPC entry as unbound
298 int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; }
299 Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; }
300 int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); }
301 int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
302 bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; }
303 bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; }
304 bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
305 bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; }
306 bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; }
307 bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; }
308 bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; }
309 bool is_byte() const { return flag_state() == btos; }
310 bool is_char() const { return flag_state() == ctos; }
311 bool is_short() const { return flag_state() == stos; }
312 bool is_int() const { return flag_state() == itos; }
313 bool is_long() const { return flag_state() == ltos; }
314 bool is_float() const { return flag_state() == ftos; }
315 bool is_double() const { return flag_state() == dtos; }
316 bool is_object() const { return flag_state() == atos; }
317 TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, "");
318 return (TosState)((_flags >> tos_state_shift) & tos_state_mask); }
319
320 // Code generation support
321 static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); }
322 static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
323 static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
324 static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
325 static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
326 static ByteSize flags_offset() { return byte_offset_of(ConstantPoolCacheEntry, _flags); }
327
328 // RedefineClasses() API support:
329 // If this constantPoolCacheEntry refers to old_method then update it
330 // to refer to new_method.
331 // trace_name_printed is set to true if the current call has
332 // printed the klass name so that other routines in the adjust_*
333 // group don't print the klass name.
334 bool adjust_method_entry(Method* old_method, Method* new_method,
335 bool * trace_name_printed);
336 NOT_PRODUCT(bool check_no_old_entries();)
337 bool is_interesting_method_entry(Klass* k);
338
339 // Debugging & Printing
340 void print (outputStream* st, int index) const;
341 void verify(outputStream* st) const;
342
343 static void verify_tos_state_shift() {
344 // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state:
345 assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask");
346 }
347 };
348
349
350 // A constant pool cache is a runtime data structure set aside to a constant pool. The cache
351 // holds interpreter runtime information for all field access and invoke bytecodes. The cache
352 // is created and initialized before a class is actively used (i.e., initialized), the indivi-
353 // dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*).
354
355 class ConstantPoolCache: public MetaspaceObj {
356 friend class VMStructs;
357 friend class MetadataFactory;
358 private:
359 int _length;
360 ConstantPool* _constant_pool; // the corresponding constant pool
361
362 // Sizing
363 debug_only(friend class ClassVerifier;)
364
365 // Constructor
366 ConstantPoolCache(int length) : _length(length), _constant_pool(NULL) {
367 for (int i = 0; i < length; i++) {
368 assert(entry_at(i)->is_f1_null(), "Failed to clear?");
369 }
370 }
371
372 public:
373 static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length, TRAPS);
374 bool is_constantPoolCache() const { return true; }
375
376 int length() const { return _length; }
377 private:
378 void set_length(int length) { _length = length; }
379
380 static int header_size() { return sizeof(ConstantPoolCache) / HeapWordSize; }
381 static int size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); }
382 public:
383 int size() const { return size(length()); }
384 private:
385
386 // Helpers
387 ConstantPool** constant_pool_addr() { return &_constant_pool; }
388 ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
389
390 friend class constantPoolCacheKlass;
391 friend class ConstantPoolCacheEntry;
392
393 public:
394 // Initialization
395 void initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map);
396
397 // Accessors
398 void set_constant_pool(ConstantPool* pool) { _constant_pool = pool; }
399 ConstantPool* constant_pool() const { return _constant_pool; }
400 // Fetches the entry at the given index.
401 // In either case the index must not be encoded or byte-swapped in any way.
402 ConstantPoolCacheEntry* entry_at(int i) const {
403 assert(0 <= i && i < length(), "index out of bounds");
404 return base() + i;
405 }
406
407 // Code generation
408 static ByteSize base_offset() { return in_ByteSize(sizeof(ConstantPoolCache)); }
409 static ByteSize entry_offset(int raw_index) {
410 int index = raw_index;
411 return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
412 }
413
414 // RedefineClasses() API support:
415 // If any entry of this constantPoolCache points to any of
416 // old_methods, replace it with the corresponding new_method.
417 // trace_name_printed is set to true if the current call has
418 // printed the klass name so that other routines in the adjust_*
419 // group don't print the klass name.
420 void adjust_method_entries(Method** old_methods, Method** new_methods,
421 int methods_length, bool * trace_name_printed);
422 NOT_PRODUCT(bool check_no_old_entries();)
423
424 // Deallocate - no fields to deallocate
425 DEBUG_ONLY(bool on_stack() { return false; })
426 void deallocate_contents(ClassLoaderData* data) {}
427 bool is_klass() const { return false; }
428
429 // Printing
430 void print_on(outputStream* st) const;
431 void print_value_on(outputStream* st) const;
432
433 const char* internal_name() const { return "{constant pool cache}"; }
434
435 // Verify
436 void verify_on(outputStream* st);
437 };
438
439 #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP