comparison src/share/vm/oops/method.hpp @ 6948:e522a00b91aa

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ after NPG - C++ build works
author Doug Simon <doug.simon@oracle.com>
date Mon, 12 Nov 2012 23:14:12 +0100
parents src/share/vm/oops/methodOop.hpp@957c266d8bc5 src/share/vm/oops/methodOop.hpp@18fb7da42534
children 41938af2b3d8
comparison
equal deleted inserted replaced
6711:ae13cc658b80 6948:e522a00b91aa
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP
26 #define SHARE_VM_OOPS_METHODOOP_HPP
27
28 #include "classfile/vmSymbols.hpp"
29 #include "code/compressedStream.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "interpreter/invocationCounter.hpp"
32 #include "oops/annotations.hpp"
33 #include "oops/constantPool.hpp"
34 #include "oops/instanceKlass.hpp"
35 #include "oops/oop.hpp"
36 #include "oops/typeArrayOop.hpp"
37 #include "utilities/accessFlags.hpp"
38 #include "utilities/growableArray.hpp"
39
40 // A Method* represents a Java method.
41 //
42 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
43 // so keeping the size of this structure small has a big impact on footprint.
44 //
45 // We put all oops and method_size first for better gc cache locality.
46 //
47 // The actual bytecodes are inlined after the end of the Method struct.
48 //
49 // There are bits in the access_flags telling whether inlined tables are present.
50 // Note that accessing the line number and local variable tables is not performance critical at all.
51 // Accessing the checked exceptions table is used by reflection, so we put that last to make access
52 // to it fast.
53 //
54 // The line number table is compressed and inlined following the byte codes. It is found as the first
55 // byte following the byte codes. The checked exceptions table and the local variable table are inlined
56 // after the line number table, and indexed from the end of the method. We do not compress the checked
57 // exceptions table since the average length is less than 2, and do not bother to compress the local
58 // variable table either since it is mostly absent.
59 //
60 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
61 //
62 // |------------------------------------------------------|
63 // | header |
64 // | klass |
65 // |------------------------------------------------------|
66 // | ConstMethod* (oop) |
67 // |------------------------------------------------------|
68 // | methodData (oop) |
69 // | interp_invocation_count |
70 // |------------------------------------------------------|
71 // | access_flags |
72 // | vtable_index |
73 // |------------------------------------------------------|
74 // | result_index (C++ interpreter only) |
75 // |------------------------------------------------------|
76 // | method_size | max_stack |
77 // | max_locals | size_of_parameters |
78 // |------------------------------------------------------|
79 // |intrinsic_id| flags | throwout_count |
80 // |------------------------------------------------------|
81 // | num_breakpoints | (unused) |
82 // |------------------------------------------------------|
83 // | invocation_counter |
84 // | backedge_counter |
85 // |------------------------------------------------------|
86 // | prev_time (tiered only, 64 bit wide) |
87 // | |
88 // |------------------------------------------------------|
89 // | rate (tiered) |
90 // |------------------------------------------------------|
91 // | code (pointer) |
92 // | i2i (pointer) |
93 // | adapter (pointer) |
94 // | from_compiled_entry (pointer) |
95 // | from_interpreted_entry (pointer) |
96 // |------------------------------------------------------|
97 // | native_function (present only if native) |
98 // | signature_handler (present only if native) |
99 // |------------------------------------------------------|
100
101
102 class CheckedExceptionElement;
103 class LocalVariableTableElement;
104 class AdapterHandlerEntry;
105 class MethodData;
106 class ConstMethod;
107
108 class Method : public Metadata {
109 friend class VMStructs;
110 private:
111 ConstMethod* _constMethod; // Method read-only data.
112 MethodData* _method_data;
113 int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
114 AccessFlags _access_flags; // Access flags
115 int _vtable_index; // vtable index of this method (see VtableIndexFlag)
116 // note: can have vtables with >2**16 elements (because of inheritance)
117 #ifdef CC_INTERP
118 int _result_index; // C++ interpreter needs for converting results to/from stack
119 #endif
120 u2 _method_size; // size of this object
121 u2 _max_stack; // Maximum number of entries on the expression stack
122 u2 _max_locals; // Number of local variables used by this method
123 u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
124 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
125 u1 _jfr_towrite : 1, // Flags
126 _force_inline : 1,
127 _hidden : 1,
128 _dont_inline : 1,
129 : 4;
130 u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
131 u2 _number_of_breakpoints; // fullspeed debugging support
132 InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
133 InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
134
135 #ifdef GRAAL
136 oop _graal_mirror; // com/oracle/graal/hotspot/HotSpotResolvedJavaMethod mirroring this method
137 jlong _graal_invocation_time;
138 int _graal_priority;
139 #endif
140 #ifdef TIERED
141 jlong _prev_time; // Previous time the rate was acquired
142 float _rate; // Events (invocation and backedge counter increments) per millisecond
143 #endif
144
145 #ifndef PRODUCT
146 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
147 #endif
148 // Entry point for calling both from and to the interpreter.
149 address _i2i_entry; // All-args-on-stack calling convention
150 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
151 AdapterHandlerEntry* _adapter;
152 // Entry point for calling from compiled code, to compiled code if it exists
153 // or else the interpreter.
154 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
155 // The entry point for calling both from and to compiled code is
156 // "_code->entry_point()". Because of tiered compilation and de-opt, this
157 // field can come and go. It can transition from NULL to not-null at any
158 // time (whenever a compile completes). It can transition from not-null to
159 // NULL only at safepoints (because of a de-opt).
160 nmethod* volatile _code; // Points to the corresponding piece of native code
161 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
162
163 // Constructor
164 Method(ConstMethod* xconst, AccessFlags access_flags, int size);
165 public:
166
167 static Method* allocate(ClassLoaderData* loader_data,
168 int byte_code_size,
169 AccessFlags access_flags,
170 int compressed_line_number_size,
171 int localvariable_table_length,
172 int exception_table_length,
173 int checked_exceptions_length,
174 ConstMethod::MethodType method_type,
175 TRAPS);
176
177 Method() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
178
179 // The Method vtable is restored by this call when the Method is in the
180 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
181 // all the gory details. SA, dtrace and pstack helpers distinguish metadata
182 // by their vtable.
183 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
184 bool is_method() const volatile { return true; }
185
186 // accessors for instance variables
187
188 ConstMethod* constMethod() const { return _constMethod; }
189 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; }
190
191
192 static address make_adapters(methodHandle mh, TRAPS);
193 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
194 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
195
196 // access flag
197 AccessFlags access_flags() const { return _access_flags; }
198 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
199
200 // name
201 Symbol* name() const { return constants()->symbol_at(name_index()); }
202 int name_index() const { return constMethod()->name_index(); }
203 void set_name_index(int index) { constMethod()->set_name_index(index); }
204
205 // signature
206 Symbol* signature() const { return constants()->symbol_at(signature_index()); }
207 int signature_index() const { return constMethod()->signature_index(); }
208 void set_signature_index(int index) { constMethod()->set_signature_index(index); }
209
210 // generics support
211 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
212 int generic_signature_index() const { return constMethod()->generic_signature_index(); }
213 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
214
215 // annotations support
216 AnnotationArray* annotations() const {
217 InstanceKlass* ik = method_holder();
218 if (ik->annotations() == NULL) {
219 return NULL;
220 }
221 return ik->annotations()->get_method_annotations_of(method_idnum());
222 }
223 AnnotationArray* parameter_annotations() const {
224 InstanceKlass* ik = method_holder();
225 if (ik->annotations() == NULL) {
226 return NULL;
227 }
228 return ik->annotations()->get_method_parameter_annotations_of(method_idnum());
229 }
230 AnnotationArray* annotation_default() const {
231 InstanceKlass* ik = method_holder();
232 if (ik->annotations() == NULL) {
233 return NULL;
234 }
235 return ik->annotations()->get_method_default_annotations_of(method_idnum());
236 }
237
238 #ifdef CC_INTERP
239 void set_result_index(BasicType type);
240 int result_index() { return _result_index; }
241 #endif
242
243 // Helper routine: get klass name + "." + method name + signature as
244 // C string, for the purpose of providing more useful NoSuchMethodErrors
245 // and fatal error handling. The string is allocated in resource
246 // area if a buffer is not provided by the caller.
247 char* name_and_sig_as_C_string() const;
248 char* name_and_sig_as_C_string(char* buf, int size) const;
249
250 // Static routine in the situations we don't have a Method*
251 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
252 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
253
254 Bytecodes::Code java_code_at(int bci) const {
255 return Bytecodes::java_code_at(this, bcp_from(bci));
256 }
257 Bytecodes::Code code_at(int bci) const {
258 return Bytecodes::code_at(this, bcp_from(bci));
259 }
260
261 // JVMTI breakpoints
262 Bytecodes::Code orig_bytecode_at(int bci) const;
263 void set_orig_bytecode_at(int bci, Bytecodes::Code code);
264 void set_breakpoint(int bci);
265 void clear_breakpoint(int bci);
266 void clear_all_breakpoints();
267 // Tracking number of breakpoints, for fullspeed debugging.
268 // Only mutated by VM thread.
269 u2 number_of_breakpoints() const { return _number_of_breakpoints; }
270 void incr_number_of_breakpoints() { ++_number_of_breakpoints; }
271 void decr_number_of_breakpoints() { --_number_of_breakpoints; }
272 // Initialization only
273 void clear_number_of_breakpoints() { _number_of_breakpoints = 0; }
274
275 // index into InstanceKlass methods() array
276 // note: also used by jfr
277 u2 method_idnum() const { return constMethod()->method_idnum(); }
278 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
279
280 // code size
281 int code_size() const { return constMethod()->code_size(); }
282
283 // method size
284 int method_size() const { return _method_size; }
285 void set_method_size(int size) {
286 assert(0 <= size && size < (1 << 16), "invalid method size");
287 _method_size = size;
288 }
289
290 // constant pool for Klass* holding this method
291 ConstantPool* constants() const { return constMethod()->constants(); }
292 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); }
293
294 // max stack
295 // return original max stack size for method verification
296 int verifier_max_stack() const { return _max_stack; }
297 int max_stack() const { return _max_stack + extra_stack_entries(); }
298 void set_max_stack(int size) { _max_stack = size; }
299
300 // max locals
301 int max_locals() const { return _max_locals; }
302 void set_max_locals(int size) { _max_locals = size; }
303
304 int highest_comp_level() const;
305 void set_highest_comp_level(int level);
306 int highest_osr_comp_level() const;
307 void set_highest_osr_comp_level(int level);
308
309 // Count of times method was exited via exception while interpreting
310 void interpreter_throwout_increment() {
311 if (_interpreter_throwout_count < 65534) {
312 _interpreter_throwout_count++;
313 }
314 }
315
316 int interpreter_throwout_count() const { return _interpreter_throwout_count; }
317 void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
318
319 // size of parameters
320 int size_of_parameters() const { return _size_of_parameters; }
321
322 bool has_stackmap_table() const {
323 return constMethod()->has_stackmap_table();
324 }
325
326 Array<u1>* stackmap_data() const {
327 return constMethod()->stackmap_data();
328 }
329
330 void set_stackmap_data(Array<u1>* sd) {
331 constMethod()->set_stackmap_data(sd);
332 }
333
334 // exception handler table
335 bool has_exception_handler() const
336 { return constMethod()->has_exception_handler(); }
337 int exception_table_length() const
338 { return constMethod()->exception_table_length(); }
339 ExceptionTableElement* exception_table_start() const
340 { return constMethod()->exception_table_start(); }
341
342 // Finds the first entry point bci of an exception handler for an
343 // exception of klass ex_klass thrown at throw_bci. A value of NULL
344 // for ex_klass indicates that the exception klass is not known; in
345 // this case it matches any constraint class. Returns -1 if the
346 // exception cannot be handled in this method. The handler
347 // constraint classes are loaded if necessary. Note that this may
348 // throw an exception if loading of the constraint classes causes
349 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
350 // If an exception is thrown, returns the bci of the
351 // exception handler which caused the exception to be thrown, which
352 // is needed for proper retries. See, for example,
353 // InterpreterRuntime::exception_handler_for_exception.
354 int fast_exception_handler_bci_for(KlassHandle ex_klass, int throw_bci, TRAPS);
355
356 // method data access
357 MethodData* method_data() const {
358 return _method_data;
359 }
360 void set_method_data(MethodData* data) {
361 _method_data = data;
362 }
363
364 // invocation counter
365 InvocationCounter* invocation_counter() { return &_invocation_counter; }
366 InvocationCounter* backedge_counter() { return &_backedge_counter; }
367
368 #ifdef TIERED
369 // We are reusing interpreter_invocation_count as a holder for the previous event count!
370 // We can do that since interpreter_invocation_count is not used in tiered.
371 int prev_event_count() const { return _interpreter_invocation_count; }
372 void set_prev_event_count(int count) { _interpreter_invocation_count = count; }
373 jlong prev_time() const { return _prev_time; }
374 void set_prev_time(jlong time) { _prev_time = time; }
375 float rate() const { return _rate; }
376 void set_rate(float rate) { _rate = rate; }
377 #endif
378
379 int invocation_count();
380 int backedge_count();
381
382 #ifdef GRAAL
383 // graal mirror
384 oop graal_mirror() const { return _graal_mirror; }
385 void set_graal_mirror(oop m) { oop_store((oop*) &_graal_mirror, m); }
386
387 void set_graal_invocation_time(jlong time) { _graal_invocation_time = time; }
388 jlong graal_invocation_time() { return _graal_invocation_time; }
389
390 void set_graal_priority(int prio) { _graal_priority = prio; }
391 int graal_priority() { return _graal_priority; }
392 #endif // GRAAL
393
394 bool was_executed_more_than(int n);
395 bool was_never_executed() { return !was_executed_more_than(0); }
396
397 static void build_interpreter_method_data(methodHandle method, TRAPS);
398
399 int interpreter_invocation_count() {
400 if (TieredCompilation) return invocation_count();
401 else return _interpreter_invocation_count;
402 }
403 void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
404 int increment_interpreter_invocation_count() {
405 if (TieredCompilation) ShouldNotReachHere();
406 return ++_interpreter_invocation_count;
407 }
408
409 #ifndef PRODUCT
410 int compiled_invocation_count() const { return _compiled_invocation_count; }
411 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
412 #endif // not PRODUCT
413
414 // Clear (non-shared space) pointers which could not be relevant
415 // if this (shared) method were mapped into another JVM.
416 void remove_unshareable_info();
417
418 // nmethod/verified compiler entry
419 address verified_code_entry();
420 bool check_code() const; // Not inline to avoid circular ref
421 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
422 void clear_code(); // Clear out any compiled code
423 static void set_code(methodHandle mh, nmethod* code);
424 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
425 address get_i2c_entry();
426 address get_c2i_entry();
427 address get_c2i_unverified_entry();
428 AdapterHandlerEntry* adapter() { return _adapter; }
429 // setup entry points
430 void link_method(methodHandle method, TRAPS);
431 // clear entry points. Used by sharing code
432 void unlink_method();
433
434 // vtable index
435 enum VtableIndexFlag {
436 // Valid vtable indexes are non-negative (>= 0).
437 // These few negative values are used as sentinels.
438 highest_unused_vtable_index_value = -5,
439 invalid_vtable_index = -4, // distinct from any valid vtable index
440 garbage_vtable_index = -3, // not yet linked; no vtable layout yet
441 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
442 // 6330203 Note: Do not use -1, which was overloaded with many meanings.
443 };
444 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
445 int vtable_index() const { assert(valid_vtable_index(), "");
446 return _vtable_index; }
447 void set_vtable_index(int index) { _vtable_index = index; }
448
449 // interpreter entry
450 address interpreter_entry() const { return _i2i_entry; }
451 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
452 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
453 int interpreter_kind(void) {
454 return constMethod()->interpreter_kind();
455 }
456 void set_interpreter_kind();
457 void set_interpreter_kind(int kind) {
458 constMethod()->set_interpreter_kind(kind);
459 }
460
461 // native function (used for native methods only)
462 enum {
463 native_bind_event_is_interesting = true
464 };
465 address native_function() const { return *(native_function_addr()); }
466 address critical_native_function();
467
468 // Must specify a real function (not NULL).
469 // Use clear_native_function() to unregister.
470 void set_native_function(address function, bool post_event_flag);
471 bool has_native_function() const;
472 void clear_native_function();
473
474 // signature handler (used for native methods only)
475 address signature_handler() const { return *(signature_handler_addr()); }
476 void set_signature_handler(address handler);
477
478 // Interpreter oopmap support
479 void mask_for(int bci, InterpreterOopMap* mask);
480
481 #ifndef PRODUCT
482 // operations on invocation counter
483 void print_invocation_count();
484 #endif
485
486 // byte codes
487 void set_code(address code) { return constMethod()->set_code(code); }
488 address code_base() const { return constMethod()->code_base(); }
489 bool contains(address bcp) const { return constMethod()->contains(bcp); }
490
491 // prints byte codes
492 void print_codes() const { print_codes_on(tty); }
493 void print_codes_on(outputStream* st) const PRODUCT_RETURN;
494 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
495
496 // checked exceptions
497 int checked_exceptions_length() const
498 { return constMethod()->checked_exceptions_length(); }
499 CheckedExceptionElement* checked_exceptions_start() const
500 { return constMethod()->checked_exceptions_start(); }
501
502 // localvariable table
503 bool has_localvariable_table() const
504 { return constMethod()->has_localvariable_table(); }
505 int localvariable_table_length() const
506 { return constMethod()->localvariable_table_length(); }
507 LocalVariableTableElement* localvariable_table_start() const
508 { return constMethod()->localvariable_table_start(); }
509
510 bool has_linenumber_table() const
511 { return constMethod()->has_linenumber_table(); }
512 u_char* compressed_linenumber_table() const
513 { return constMethod()->compressed_linenumber_table(); }
514
515 // method holder (the Klass* holding this method)
516 InstanceKlass* method_holder() const { return constants()->pool_holder(); }
517
518 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
519 Symbol* klass_name() const; // returns the name of the method holder
520 BasicType result_type() const; // type of the method result
521 int result_type_index() const; // type index of the method result
522 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
523 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
524
525 // Checked exceptions thrown by this method (resolved to mirrors)
526 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
527
528 // Access flags
529 bool is_public() const { return access_flags().is_public(); }
530 bool is_private() const { return access_flags().is_private(); }
531 bool is_protected() const { return access_flags().is_protected(); }
532 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
533 bool is_static() const { return access_flags().is_static(); }
534 bool is_final() const { return access_flags().is_final(); }
535 bool is_synchronized() const { return access_flags().is_synchronized();}
536 bool is_native() const { return access_flags().is_native(); }
537 bool is_abstract() const { return access_flags().is_abstract(); }
538 bool is_strict() const { return access_flags().is_strict(); }
539 bool is_synthetic() const { return access_flags().is_synthetic(); }
540
541 // returns true if contains only return operation
542 bool is_empty_method() const;
543
544 // returns true if this is a vanilla constructor
545 bool is_vanilla_constructor() const;
546
547 // checks method and its method holder
548 bool is_final_method() const;
549 bool is_strict_method() const;
550
551 // true if method needs no dynamic dispatch (final and/or no vtable entry)
552 bool can_be_statically_bound() const;
553
554 // returns true if the method has any backward branches.
555 bool has_loops() {
556 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
557 };
558
559 bool compute_has_loops_flag();
560
561 bool has_jsrs() {
562 return access_flags().has_jsrs();
563 };
564 void set_has_jsrs() {
565 _access_flags.set_has_jsrs();
566 }
567
568 // returns true if the method has any monitors.
569 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
570 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
571
572 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
573
574 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
575 // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
576 // has not been computed yet.
577 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
578 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
579
580 // returns true if the method is an accessor function (setter/getter).
581 bool is_accessor() const;
582
583 // returns true if the method is an initializer (<init> or <clinit>).
584 bool is_initializer() const;
585
586 // returns true if the method is static OR if the classfile version < 51
587 bool has_valid_initializer_flags() const;
588
589 // returns true if the method name is <clinit> and the method has
590 // valid static initializer flags.
591 bool is_static_initializer() const;
592
593 // compiled code support
594 // NOTE: code() is inherently racy as deopt can be clearing code
595 // simultaneously. Use with caution.
596 bool has_compiled_code() const { return code() != NULL; }
597
598 // sizing
599 static int header_size() { return sizeof(Method)/HeapWordSize; }
600 static int size(bool is_native);
601 int size() const { return method_size(); }
602
603 // interpreter support
604 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); }
605 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
606 #ifdef CC_INTERP
607 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
608 #endif /* CC_INTERP */
609 static ByteSize size_of_locals_offset() { return byte_offset_of(Method, _max_locals ); }
610 static ByteSize size_of_parameters_offset() { return byte_offset_of(Method, _size_of_parameters); }
611 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
612 static ByteSize code_offset() { return byte_offset_of(Method, _code); }
613 static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); }
614 static ByteSize backedge_counter_offset() { return byte_offset_of(Method, _backedge_counter); }
615 static ByteSize method_data_offset() {
616 return byte_offset_of(Method, _method_data);
617 }
618 static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(Method, _interpreter_invocation_count); }
619 #ifdef GRAAL
620 static ByteSize graal_invocation_time_offset() { return byte_offset_of(Method, _graal_invocation_time); }
621 static ByteSize graal_priority_offset() { return byte_offset_of(Method, _graal_priority); }
622 #endif
623 #ifndef PRODUCT
624 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); }
625 #endif // not PRODUCT
626 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); }
627 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
628 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
629 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
630 static ByteSize max_stack_offset() { return byte_offset_of(Method, _max_stack ); }
631
632 // for code generation
633 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
634 static int interpreter_invocation_counter_offset_in_bytes()
635 { return offset_of(Method, _interpreter_invocation_count); }
636 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
637 static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
638
639 // Static methods that are used to implement member methods where an exposed this pointer
640 // is needed due to possible GCs
641 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
642
643 // Returns the byte code index from the byte code pointer
644 int bci_from(address bcp) const;
645 address bcp_from(int bci) const;
646 int validate_bci_from_bcx(intptr_t bcx) const;
647
648 // Returns the line number for a bci if debugging information for the method is prowided,
649 // -1 is returned otherwise.
650 int line_number_from_bci(int bci) const;
651
652 // Reflection support
653 bool is_overridden_in(Klass* k) const;
654
655 // JSR 292 support
656 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
657 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
658 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
659 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
660 Symbol* signature, //anything at all
661 TRAPS);
662 static Klass* check_non_bcp_klass(Klass* klass);
663 // these operate only on invoke methods:
664 // presize interpreter frames for extra interpreter stack entries, if needed
665 // method handles want to be able to push a few extra values (e.g., a bound receiver), and
666 // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
667 // all without checking for a stack overflow
668 static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; }
669 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
670
671 // RedefineClasses() support:
672 bool is_old() const { return access_flags().is_old(); }
673 void set_is_old() { _access_flags.set_is_old(); }
674 bool is_obsolete() const { return access_flags().is_obsolete(); }
675 void set_is_obsolete() { _access_flags.set_is_obsolete(); }
676 bool on_stack() const { return access_flags().on_stack(); }
677 void set_on_stack(const bool value);
678
679 // see the definition in Method*.cpp for the gory details
680 bool should_not_be_cached() const;
681
682 // JVMTI Native method prefixing support:
683 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
684 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
685
686 // Rewriting support
687 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
688 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
689
690 // jmethodID handling
691 // Because the useful life-span of a jmethodID cannot be determined,
692 // once created they are never reclaimed. The methods to which they refer,
693 // however, can be GC'ed away if the class is unloaded or if the method is
694 // made obsolete or deleted -- in these cases, the jmethodID
695 // refers to NULL (as is the case for any weak reference).
696 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
697 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
698
699 // Use resolve_jmethod_id() in situations where the caller is expected
700 // to provide a valid jmethodID; the only sanity checks are in asserts;
701 // result guaranteed not to be NULL.
702 inline static Method* resolve_jmethod_id(jmethodID mid) {
703 assert(mid != NULL, "JNI method id should not be null");
704 return *((Method**)mid);
705 }
706
707 // Use checked_resolve_jmethod_id() in situations where the caller
708 // should provide a valid jmethodID, but might not. NULL is returned
709 // when the jmethodID does not refer to a valid method.
710 static Method* checked_resolve_jmethod_id(jmethodID mid);
711
712 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
713 static bool is_method_id(jmethodID mid);
714
715 // Clear methods
716 static void clear_jmethod_ids(ClassLoaderData* loader_data);
717 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
718
719 // Get this method's jmethodID -- allocate if it doesn't exist
720 jmethodID jmethod_id() { methodHandle this_h(this);
721 return InstanceKlass::get_jmethod_id(method_holder(), this_h); }
722
723 // Lookup the jmethodID for this method. Return NULL if not found.
724 // NOTE that this function can be called from a signal handler
725 // (see AsyncGetCallTrace support for Forte Analyzer) and this
726 // needs to be async-safe. No allocation should be done and
727 // so handles are not used to avoid deadlock.
728 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
729
730 // JNI static invoke cached itable index accessors
731 int cached_itable_index() { return method_holder()->cached_itable_index(method_idnum()); }
732 void set_cached_itable_index(int index) { method_holder()->set_cached_itable_index(method_idnum(), index); }
733
734 // Support for inlining of intrinsic methods
735 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
736 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
737
738 // Helper routines for intrinsic_id() and vmIntrinsics::method().
739 void init_intrinsic_id(); // updates from _none if a match
740 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
741
742 bool jfr_towrite() { return _jfr_towrite; }
743 void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
744
745 bool force_inline() { return _force_inline; }
746 void set_force_inline(bool x) { _force_inline = x; }
747 bool dont_inline() { return _dont_inline; }
748 void set_dont_inline(bool x) { _dont_inline = x; }
749 bool is_hidden() { return _hidden; }
750 void set_hidden(bool x) { _hidden = x; }
751 ConstMethod::MethodType method_type() const {
752 return _constMethod->method_type();
753 }
754 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; }
755
756 // On-stack replacement support
757 bool has_osr_nmethod(int level, bool match_level) {
758 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
759 }
760
761 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
762 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
763 }
764
765 // Inline cache support
766 void cleanup_inline_caches();
767
768 // Find if klass for method is loaded
769 bool is_klass_loaded_by_klass_index(int klass_index) const;
770 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
771
772 // Indicates whether compilation failed earlier for this method, or
773 // whether it is not compilable for another reason like having a
774 // breakpoint set in it.
775 bool is_not_compilable(int comp_level = CompLevel_any) const;
776 void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
777 void set_not_compilable_quietly(int comp_level = CompLevel_all) {
778 set_not_compilable(comp_level, false);
779 }
780 bool is_not_osr_compilable(int comp_level = CompLevel_any) const;
781 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true);
782 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
783 set_not_osr_compilable(comp_level, false);
784 }
785
786 private:
787 void print_made_not_compilable(int comp_level, bool is_osr, bool report);
788
789 public:
790 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
791 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
792 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
793 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
794
795 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit
796 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit
797 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); }
798 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); }
799
800 // Background compilation support
801 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
802 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
803 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
804
805 // Resolve all classes in signature, return 'true' if successful
806 static bool load_signature_classes(methodHandle m, TRAPS);
807
808 // Return if true if not all classes references in signature, including return type, has been loaded
809 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
810
811 // Printing
812 void print_short_name(outputStream* st = tty) /*PRODUCT_RETURN*/; // prints as klassname::methodname; Exposed so field engineers can debug VM
813 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
814
815 // Helper routine used for method sorting
816 static void sort_methods(Array<Method*>* methods,
817 Array<AnnotationArray*>* methods_annotations,
818 Array<AnnotationArray*>* methods_parameter_annotations,
819 Array<AnnotationArray*>* methods_default_annotations,
820 bool idempotent = false);
821
822 // size of parameters
823 void set_size_of_parameters(int size) { _size_of_parameters = size; }
824
825 // Deallocation function for redefine classes or if an error occurs
826 void deallocate_contents(ClassLoaderData* loader_data);
827
828 // Printing
829 #ifndef PRODUCT
830 void print_on(outputStream* st) const;
831 #endif
832 void print_value_on(outputStream* st) const;
833
834 const char* internal_name() const { return "{method}"; }
835
836 // Verify
837 void verify() { verify_on(tty); }
838 void verify_on(outputStream* st);
839
840 private:
841
842 // Inlined elements
843 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
844 address* signature_handler_addr() const { return native_function_addr() + 1; }
845 #ifdef GRAAL
846 oop* adr_graal_mirror() const { return (oop*)&_graal_mirror; }
847 #endif
848 };
849
850
851 // Utility class for compressing line number tables
852
853 class CompressedLineNumberWriteStream: public CompressedWriteStream {
854 private:
855 int _bci;
856 int _line;
857 public:
858 // Constructor
859 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
860 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
861
862 // Write (bci, line number) pair to stream
863 void write_pair_regular(int bci_delta, int line_delta);
864
865 inline void write_pair_inline(int bci, int line) {
866 int bci_delta = bci - _bci;
867 int line_delta = line - _line;
868 _bci = bci;
869 _line = line;
870 // Skip (0,0) deltas - they do not add information and conflict with terminator.
871 if (bci_delta == 0 && line_delta == 0) return;
872 // Check if bci is 5-bit and line number 3-bit unsigned.
873 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
874 // Compress into single byte.
875 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
876 // Check that value doesn't match escape character.
877 if (value != 0xFF) {
878 write_byte(value);
879 return;
880 }
881 }
882 write_pair_regular(bci_delta, line_delta);
883 }
884
885 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
886 // Disabling optimization doesn't work for methods in header files
887 // so we force it to call through the non-optimized version in the .cpp.
888 // It's gross, but it's the only way we can ensure that all callers are
889 // fixed. _MSC_VER is defined by the windows compiler
890 #if defined(_M_AMD64) && _MSC_VER >= 1400
891 void write_pair(int bci, int line);
892 #else
893 void write_pair(int bci, int line) { write_pair_inline(bci, line); }
894 #endif
895
896 // Write end-of-stream marker
897 void write_terminator() { write_byte(0); }
898 };
899
900
901 // Utility class for decompressing line number tables
902
903 class CompressedLineNumberReadStream: public CompressedReadStream {
904 private:
905 int _bci;
906 int _line;
907 public:
908 // Constructor
909 CompressedLineNumberReadStream(u_char* buffer);
910 // Read (bci, line number) pair from stream. Returns false at end-of-stream.
911 bool read_pair();
912 // Accessing bci and line number (after calling read_pair)
913 int bci() const { return _bci; }
914 int line() const { return _line; }
915 };
916
917
918 /// Fast Breakpoints.
919
920 // If this structure gets more complicated (because bpts get numerous),
921 // move it into its own header.
922
923 // There is presently no provision for concurrent access
924 // to breakpoint lists, which is only OK for JVMTI because
925 // breakpoints are written only at safepoints, and are read
926 // concurrently only outside of safepoints.
927
928 class BreakpointInfo : public CHeapObj<mtClass> {
929 friend class VMStructs;
930 private:
931 Bytecodes::Code _orig_bytecode;
932 int _bci;
933 u2 _name_index; // of method
934 u2 _signature_index; // of method
935 BreakpointInfo* _next; // simple storage allocation
936
937 public:
938 BreakpointInfo(Method* m, int bci);
939
940 // accessors
941 Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
942 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
943 int bci() { return _bci; }
944
945 BreakpointInfo* next() const { return _next; }
946 void set_next(BreakpointInfo* n) { _next = n; }
947
948 // helps for searchers
949 bool match(const Method* m, int bci) {
950 return bci == _bci && match(m);
951 }
952
953 bool match(const Method* m) {
954 return _name_index == m->name_index() &&
955 _signature_index == m->signature_index();
956 }
957
958 void set(Method* method);
959 void clear(Method* method);
960 };
961
962 // Utility class for access exception handlers
963 class ExceptionTable : public StackObj {
964 private:
965 ExceptionTableElement* _table;
966 u2 _length;
967
968 public:
969 ExceptionTable(Method* m) {
970 if (m->has_exception_handler()) {
971 _table = m->exception_table_start();
972 _length = m->exception_table_length();
973 } else {
974 _table = NULL;
975 _length = 0;
976 }
977 }
978
979 int length() const {
980 return _length;
981 }
982
983 u2 start_pc(int idx) const {
984 assert(idx < _length, "out of bounds");
985 return _table[idx].start_pc;
986 }
987
988 void set_start_pc(int idx, u2 value) {
989 assert(idx < _length, "out of bounds");
990 _table[idx].start_pc = value;
991 }
992
993 u2 end_pc(int idx) const {
994 assert(idx < _length, "out of bounds");
995 return _table[idx].end_pc;
996 }
997
998 void set_end_pc(int idx, u2 value) {
999 assert(idx < _length, "out of bounds");
1000 _table[idx].end_pc = value;
1001 }
1002
1003 u2 handler_pc(int idx) const {
1004 assert(idx < _length, "out of bounds");
1005 return _table[idx].handler_pc;
1006 }
1007
1008 void set_handler_pc(int idx, u2 value) {
1009 assert(idx < _length, "out of bounds");
1010 _table[idx].handler_pc = value;
1011 }
1012
1013 u2 catch_type_index(int idx) const {
1014 assert(idx < _length, "out of bounds");
1015 return _table[idx].catch_type_index;
1016 }
1017
1018 void set_catch_type_index(int idx, u2 value) {
1019 assert(idx < _length, "out of bounds");
1020 _table[idx].catch_type_index = value;
1021 }
1022 };
1023
1024 #endif // SHARE_VM_OOPS_METHODOOP_HPP