comparison src/share/vm/oops/method.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents src/share/vm/oops/methodOop.cpp@977007096840
children aed758eda82a
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "code/debugInfoRec.hpp"
28 #include "gc_interface/collectedHeap.inline.hpp"
29 #include "interpreter/bytecodeStream.hpp"
30 #include "interpreter/bytecodeTracer.hpp"
31 #include "interpreter/bytecodes.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/oopMapCache.hpp"
34 #include "memory/gcLocker.hpp"
35 #include "memory/generation.hpp"
36 #include "memory/metadataFactory.hpp"
37 #include "memory/oopFactory.hpp"
38 #include "oops/methodData.hpp"
39 #include "oops/method.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "oops/symbol.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "prims/jvmtiRedefineClasses.hpp"
44 #include "prims/methodHandles.hpp"
45 #include "prims/nativeLookup.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/compilationPolicy.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/relocator.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/signature.hpp"
53 #include "utilities/quickSort.hpp"
54 #include "utilities/xmlstream.hpp"
55
56
57 // Implementation of Method
58
59 Method* Method::allocate(ClassLoaderData* loader_data,
60 int byte_code_size,
61 AccessFlags access_flags,
62 int compressed_line_number_size,
63 int localvariable_table_length,
64 int exception_table_length,
65 int checked_exceptions_length,
66 TRAPS) {
67 assert(!access_flags.is_native() || byte_code_size == 0,
68 "native methods should not contain byte codes");
69 ConstMethod* cm = ConstMethod::allocate(loader_data,
70 byte_code_size,
71 compressed_line_number_size,
72 localvariable_table_length,
73 exception_table_length,
74 checked_exceptions_length,
75 CHECK_NULL);
76
77 int size = Method::size(access_flags.is_native());
78
79 return new (loader_data, size, false, THREAD) Method(cm, access_flags, size);
80 }
81
82 Method::Method(ConstMethod* xconst,
83 AccessFlags access_flags, int size) {
84 No_Safepoint_Verifier no_safepoint;
85 set_constMethod(xconst);
86 set_access_flags(access_flags);
87 set_method_size(size);
88 set_name_index(0);
89 set_signature_index(0);
90 #ifdef CC_INTERP
91 set_result_index(T_VOID);
92 #endif
93 set_constants(NULL);
94 set_max_stack(0);
95 set_max_locals(0);
96 set_intrinsic_id(vmIntrinsics::_none);
97 set_jfr_towrite(false);
98 set_method_data(NULL);
99 set_interpreter_throwout_count(0);
100 set_vtable_index(Method::garbage_vtable_index);
101
102 // Fix and bury in Method*
103 set_interpreter_entry(NULL); // sets i2i entry and from_int
104 set_adapter_entry(NULL);
105 clear_code(); // from_c/from_i get set to c2i/i2i
106
107 if (access_flags.is_native()) {
108 clear_native_function();
109 set_signature_handler(NULL);
110 }
111
112 NOT_PRODUCT(set_compiled_invocation_count(0);)
113 set_interpreter_invocation_count(0);
114 invocation_counter()->init();
115 backedge_counter()->init();
116 clear_number_of_breakpoints();
117
118 #ifdef TIERED
119 set_rate(0);
120 set_prev_event_count(0);
121 set_prev_time(0);
122 #endif
123 }
124
125 // Release Method*. The nmethod will be gone when we get here because
126 // we've walked the code cache.
127 void Method::deallocate_contents(ClassLoaderData* loader_data) {
128 MetadataFactory::free_metadata(loader_data, constMethod());
129 set_constMethod(NULL);
130 MetadataFactory::free_metadata(loader_data, method_data());
131 set_method_data(NULL);
132 // The nmethod will be gone when we get here.
133 if (code() != NULL) _code = NULL;
134 }
135
136 address Method::get_i2c_entry() {
137 assert(_adapter != NULL, "must have");
138 return _adapter->get_i2c_entry();
139 }
140
141 address Method::get_c2i_entry() {
142 assert(_adapter != NULL, "must have");
143 return _adapter->get_c2i_entry();
144 }
145
146 address Method::get_c2i_unverified_entry() {
147 assert(_adapter != NULL, "must have");
148 return _adapter->get_c2i_unverified_entry();
149 }
150
151 char* Method::name_and_sig_as_C_string() const {
152 return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature());
153 }
154
155 char* Method::name_and_sig_as_C_string(char* buf, int size) const {
156 return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature(), buf, size);
157 }
158
159 char* Method::name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature) {
160 const char* klass_name = klass->external_name();
161 int klass_name_len = (int)strlen(klass_name);
162 int method_name_len = method_name->utf8_length();
163 int len = klass_name_len + 1 + method_name_len + signature->utf8_length();
164 char* dest = NEW_RESOURCE_ARRAY(char, len + 1);
165 strcpy(dest, klass_name);
166 dest[klass_name_len] = '.';
167 strcpy(&dest[klass_name_len + 1], method_name->as_C_string());
168 strcpy(&dest[klass_name_len + 1 + method_name_len], signature->as_C_string());
169 dest[len] = 0;
170 return dest;
171 }
172
173 char* Method::name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size) {
174 Symbol* klass_name = klass->name();
175 klass_name->as_klass_external_name(buf, size);
176 int len = (int)strlen(buf);
177
178 if (len < size - 1) {
179 buf[len++] = '.';
180
181 method_name->as_C_string(&(buf[len]), size - len);
182 len = (int)strlen(buf);
183
184 signature->as_C_string(&(buf[len]), size - len);
185 }
186
187 return buf;
188 }
189
190 int Method::fast_exception_handler_bci_for(KlassHandle ex_klass, int throw_bci, TRAPS) {
191 // exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index)
192 // access exception table
193 ExceptionTable table(this);
194 int length = table.length();
195 // iterate through all entries sequentially
196 constantPoolHandle pool(THREAD, constants());
197 for (int i = 0; i < length; i ++) {
198 //reacquire the table in case a GC happened
199 ExceptionTable table(this);
200 int beg_bci = table.start_pc(i);
201 int end_bci = table.end_pc(i);
202 assert(beg_bci <= end_bci, "inconsistent exception table");
203 if (beg_bci <= throw_bci && throw_bci < end_bci) {
204 // exception handler bci range covers throw_bci => investigate further
205 int handler_bci = table.handler_pc(i);
206 int klass_index = table.catch_type_index(i);
207 if (klass_index == 0) {
208 return handler_bci;
209 } else if (ex_klass.is_null()) {
210 return handler_bci;
211 } else {
212 // we know the exception class => get the constraint class
213 // this may require loading of the constraint class; if verification
214 // fails or some other exception occurs, return handler_bci
215 Klass* k = pool->klass_at(klass_index, CHECK_(handler_bci));
216 KlassHandle klass = KlassHandle(THREAD, k);
217 assert(klass.not_null(), "klass not loaded");
218 if (ex_klass->is_subtype_of(klass())) {
219 return handler_bci;
220 }
221 }
222 }
223 }
224
225 return -1;
226 }
227
228 void Method::mask_for(int bci, InterpreterOopMap* mask) {
229
230 Thread* myThread = Thread::current();
231 methodHandle h_this(myThread, this);
232 #ifdef ASSERT
233 bool has_capability = myThread->is_VM_thread() ||
234 myThread->is_ConcurrentGC_thread() ||
235 myThread->is_GC_task_thread();
236
237 if (!has_capability) {
238 if (!VerifyStack && !VerifyLastFrame) {
239 // verify stack calls this outside VM thread
240 warning("oopmap should only be accessed by the "
241 "VM, GC task or CMS threads (or during debugging)");
242 InterpreterOopMap local_mask;
243 InstanceKlass::cast(method_holder())->mask_for(h_this, bci, &local_mask);
244 local_mask.print();
245 }
246 }
247 #endif
248 InstanceKlass::cast(method_holder())->mask_for(h_this, bci, mask);
249 return;
250 }
251
252
253 int Method::bci_from(address bcp) const {
254 assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(),
255 err_msg("bcp doesn't belong to this method: bcp: " INTPTR_FORMAT ", method: %s", bcp, name_and_sig_as_C_string()));
256 return bcp - code_base();
257 }
258
259
260 // Return (int)bcx if it appears to be a valid BCI.
261 // Return bci_from((address)bcx) if it appears to be a valid BCP.
262 // Return -1 otherwise.
263 // Used by profiling code, when invalid data is a possibility.
264 // The caller is responsible for validating the Method* itself.
265 int Method::validate_bci_from_bcx(intptr_t bcx) const {
266 // keep bci as -1 if not a valid bci
267 int bci = -1;
268 if (bcx == 0 || (address)bcx == code_base()) {
269 // code_size() may return 0 and we allow 0 here
270 // the method may be native
271 bci = 0;
272 } else if (frame::is_bci(bcx)) {
273 if (bcx < code_size()) {
274 bci = (int)bcx;
275 }
276 } else if (contains((address)bcx)) {
277 bci = (address)bcx - code_base();
278 }
279 // Assert that if we have dodged any asserts, bci is negative.
280 assert(bci == -1 || bci == bci_from(bcp_from(bci)), "sane bci if >=0");
281 return bci;
282 }
283
284 address Method::bcp_from(int bci) const {
285 assert((is_native() && bci == 0) || (!is_native() && 0 <= bci && bci < code_size()), "illegal bci");
286 address bcp = code_base() + bci;
287 assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
288 return bcp;
289 }
290
291
292 int Method::size(bool is_native) {
293 // If native, then include pointers for native_function and signature_handler
294 int extra_bytes = (is_native) ? 2*sizeof(address*) : 0;
295 int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
296 return align_object_size(header_size() + extra_words);
297 }
298
299
300 Symbol* Method::klass_name() const {
301 Klass* k = method_holder();
302 assert(k->is_klass(), "must be klass");
303 InstanceKlass* ik = (InstanceKlass*) k;
304 return ik->name();
305 }
306
307
308 void Method::set_interpreter_kind() {
309 int kind = Interpreter::method_kind(this);
310 assert(kind != Interpreter::invalid,
311 "interpreter entry must be valid");
312 set_interpreter_kind(kind);
313 }
314
315
316 // Attempt to return method oop to original state. Clear any pointers
317 // (to objects outside the shared spaces). We won't be able to predict
318 // where they should point in a new JVM. Further initialize some
319 // entries now in order allow them to be write protected later.
320
321 void Method::remove_unshareable_info() {
322 unlink_method();
323 set_interpreter_kind();
324 }
325
326
327 bool Method::was_executed_more_than(int n) {
328 // Invocation counter is reset when the Method* is compiled.
329 // If the method has compiled code we therefore assume it has
330 // be excuted more than n times.
331 if (is_accessor() || is_empty_method() || (code() != NULL)) {
332 // interpreter doesn't bump invocation counter of trivial methods
333 // compiler does not bump invocation counter of compiled methods
334 return true;
335 }
336 else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) {
337 // The carry bit is set when the counter overflows and causes
338 // a compilation to occur. We don't know how many times
339 // the counter has been reset, so we simply assume it has
340 // been executed more than n times.
341 return true;
342 } else {
343 return invocation_count() > n;
344 }
345 }
346
347 #ifndef PRODUCT
348 void Method::print_invocation_count() {
349 if (is_static()) tty->print("static ");
350 if (is_final()) tty->print("final ");
351 if (is_synchronized()) tty->print("synchronized ");
352 if (is_native()) tty->print("native ");
353 method_holder()->name()->print_symbol_on(tty);
354 tty->print(".");
355 name()->print_symbol_on(tty);
356 signature()->print_symbol_on(tty);
357
358 if (WizardMode) {
359 // dump the size of the byte codes
360 tty->print(" {%d}", code_size());
361 }
362 tty->cr();
363
364 tty->print_cr (" interpreter_invocation_count: %8d ", interpreter_invocation_count());
365 tty->print_cr (" invocation_counter: %8d ", invocation_count());
366 tty->print_cr (" backedge_counter: %8d ", backedge_count());
367 if (CountCompiledCalls) {
368 tty->print_cr (" compiled_invocation_count: %8d ", compiled_invocation_count());
369 }
370
371 }
372 #endif
373
374 // Build a MethodData* object to hold information about this method
375 // collected in the interpreter.
376 void Method::build_interpreter_method_data(methodHandle method, TRAPS) {
377 // Do not profile method if current thread holds the pending list lock,
378 // which avoids deadlock for acquiring the MethodData_lock.
379 if (instanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
380 return;
381 }
382
383 // Grab a lock here to prevent multiple
384 // MethodData*s from being created.
385 MutexLocker ml(MethodData_lock, THREAD);
386 if (method->method_data() == NULL) {
387 ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
388 MethodData* method_data = MethodData::allocate(loader_data, method, CHECK);
389 method->set_method_data(method_data);
390 if (PrintMethodData && (Verbose || WizardMode)) {
391 ResourceMark rm(THREAD);
392 tty->print("build_interpreter_method_data for ");
393 method->print_name(tty);
394 tty->cr();
395 // At the end of the run, the MDO, full of data, will be dumped.
396 }
397 }
398 }
399
400 void Method::cleanup_inline_caches() {
401 // The current system doesn't use inline caches in the interpreter
402 // => nothing to do (keep this method around for future use)
403 }
404
405
406 int Method::extra_stack_words() {
407 // not an inline function, to avoid a header dependency on Interpreter
408 return extra_stack_entries() * Interpreter::stackElementSize;
409 }
410
411
412 void Method::compute_size_of_parameters(Thread *thread) {
413 ArgumentSizeComputer asc(signature());
414 set_size_of_parameters(asc.size() + (is_static() ? 0 : 1));
415 }
416
417 #ifdef CC_INTERP
418 void Method::set_result_index(BasicType type) {
419 _result_index = Interpreter::BasicType_as_index(type);
420 }
421 #endif
422
423 BasicType Method::result_type() const {
424 ResultTypeFinder rtf(signature());
425 return rtf.type();
426 }
427
428
429 bool Method::is_empty_method() const {
430 return code_size() == 1
431 && *code_base() == Bytecodes::_return;
432 }
433
434
435 bool Method::is_vanilla_constructor() const {
436 // Returns true if this method is a vanilla constructor, i.e. an "<init>" "()V" method
437 // which only calls the superclass vanilla constructor and possibly does stores of
438 // zero constants to local fields:
439 //
440 // aload_0
441 // invokespecial
442 // indexbyte1
443 // indexbyte2
444 //
445 // followed by an (optional) sequence of:
446 //
447 // aload_0
448 // aconst_null / iconst_0 / fconst_0 / dconst_0
449 // putfield
450 // indexbyte1
451 // indexbyte2
452 //
453 // followed by:
454 //
455 // return
456
457 assert(name() == vmSymbols::object_initializer_name(), "Should only be called for default constructors");
458 assert(signature() == vmSymbols::void_method_signature(), "Should only be called for default constructors");
459 int size = code_size();
460 // Check if size match
461 if (size == 0 || size % 5 != 0) return false;
462 address cb = code_base();
463 int last = size - 1;
464 if (cb[0] != Bytecodes::_aload_0 || cb[1] != Bytecodes::_invokespecial || cb[last] != Bytecodes::_return) {
465 // Does not call superclass default constructor
466 return false;
467 }
468 // Check optional sequence
469 for (int i = 4; i < last; i += 5) {
470 if (cb[i] != Bytecodes::_aload_0) return false;
471 if (!Bytecodes::is_zero_const(Bytecodes::cast(cb[i+1]))) return false;
472 if (cb[i+2] != Bytecodes::_putfield) return false;
473 }
474 return true;
475 }
476
477
478 bool Method::compute_has_loops_flag() {
479 BytecodeStream bcs(this);
480 Bytecodes::Code bc;
481
482 while ((bc = bcs.next()) >= 0) {
483 switch( bc ) {
484 case Bytecodes::_ifeq:
485 case Bytecodes::_ifnull:
486 case Bytecodes::_iflt:
487 case Bytecodes::_ifle:
488 case Bytecodes::_ifne:
489 case Bytecodes::_ifnonnull:
490 case Bytecodes::_ifgt:
491 case Bytecodes::_ifge:
492 case Bytecodes::_if_icmpeq:
493 case Bytecodes::_if_icmpne:
494 case Bytecodes::_if_icmplt:
495 case Bytecodes::_if_icmpgt:
496 case Bytecodes::_if_icmple:
497 case Bytecodes::_if_icmpge:
498 case Bytecodes::_if_acmpeq:
499 case Bytecodes::_if_acmpne:
500 case Bytecodes::_goto:
501 case Bytecodes::_jsr:
502 if( bcs.dest() < bcs.next_bci() ) _access_flags.set_has_loops();
503 break;
504
505 case Bytecodes::_goto_w:
506 case Bytecodes::_jsr_w:
507 if( bcs.dest_w() < bcs.next_bci() ) _access_flags.set_has_loops();
508 break;
509 }
510 }
511 _access_flags.set_loops_flag_init();
512 return _access_flags.has_loops();
513 }
514
515
516 bool Method::is_final_method() const {
517 // %%% Should return true for private methods also,
518 // since there is no way to override them.
519 return is_final() || Klass::cast(method_holder())->is_final();
520 }
521
522
523 bool Method::is_strict_method() const {
524 return is_strict();
525 }
526
527
528 bool Method::can_be_statically_bound() const {
529 if (is_final_method()) return true;
530 return vtable_index() == nonvirtual_vtable_index;
531 }
532
533
534 bool Method::is_accessor() const {
535 if (code_size() != 5) return false;
536 if (size_of_parameters() != 1) return false;
537 if (java_code_at(0) != Bytecodes::_aload_0 ) return false;
538 if (java_code_at(1) != Bytecodes::_getfield) return false;
539 if (java_code_at(4) != Bytecodes::_areturn &&
540 java_code_at(4) != Bytecodes::_ireturn ) return false;
541 return true;
542 }
543
544
545 bool Method::is_initializer() const {
546 return name() == vmSymbols::object_initializer_name() || is_static_initializer();
547 }
548
549 bool Method::has_valid_initializer_flags() const {
550 return (is_static() ||
551 InstanceKlass::cast(method_holder())->major_version() < 51);
552 }
553
554 bool Method::is_static_initializer() const {
555 // For classfiles version 51 or greater, ensure that the clinit method is
556 // static. Non-static methods with the name "<clinit>" are not static
557 // initializers. (older classfiles exempted for backward compatibility)
558 return name() == vmSymbols::class_initializer_name() &&
559 has_valid_initializer_flags();
560 }
561
562
563 objArrayHandle Method::resolved_checked_exceptions_impl(Method* this_oop, TRAPS) {
564 int length = this_oop->checked_exceptions_length();
565 if (length == 0) { // common case
566 return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
567 } else {
568 methodHandle h_this(THREAD, this_oop);
569 objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::Class_klass(), length, CHECK_(objArrayHandle()));
570 objArrayHandle mirrors (THREAD, m_oop);
571 for (int i = 0; i < length; i++) {
572 CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
573 Klass* k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
574 assert(Klass::cast(k)->is_subclass_of(SystemDictionary::Throwable_klass()), "invalid exception class");
575 mirrors->obj_at_put(i, Klass::cast(k)->java_mirror());
576 }
577 return mirrors;
578 }
579 };
580
581
582 int Method::line_number_from_bci(int bci) const {
583 if (bci == SynchronizationEntryBCI) bci = 0;
584 assert(bci == 0 || 0 <= bci && bci < code_size(), "illegal bci");
585 int best_bci = 0;
586 int best_line = -1;
587
588 if (has_linenumber_table()) {
589 // The line numbers are a short array of 2-tuples [start_pc, line_number].
590 // Not necessarily sorted and not necessarily one-to-one.
591 CompressedLineNumberReadStream stream(compressed_linenumber_table());
592 while (stream.read_pair()) {
593 if (stream.bci() == bci) {
594 // perfect match
595 return stream.line();
596 } else {
597 // update best_bci/line
598 if (stream.bci() < bci && stream.bci() >= best_bci) {
599 best_bci = stream.bci();
600 best_line = stream.line();
601 }
602 }
603 }
604 }
605 return best_line;
606 }
607
608
609 bool Method::is_klass_loaded_by_klass_index(int klass_index) const {
610 if( constants()->tag_at(klass_index).is_unresolved_klass() ) {
611 Thread *thread = Thread::current();
612 Symbol* klass_name = constants()->klass_name_at(klass_index);
613 Handle loader(thread, InstanceKlass::cast(method_holder())->class_loader());
614 Handle prot (thread, Klass::cast(method_holder())->protection_domain());
615 return SystemDictionary::find(klass_name, loader, prot, thread) != NULL;
616 } else {
617 return true;
618 }
619 }
620
621
622 bool Method::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
623 int klass_index = constants()->klass_ref_index_at(refinfo_index);
624 if (must_be_resolved) {
625 // Make sure klass is resolved in constantpool.
626 if (constants()->tag_at(klass_index).is_unresolved_klass()) return false;
627 }
628 return is_klass_loaded_by_klass_index(klass_index);
629 }
630
631
632 void Method::set_native_function(address function, bool post_event_flag) {
633 assert(function != NULL, "use clear_native_function to unregister natives");
634 assert(!is_method_handle_intrinsic() || function == SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), "");
635 address* native_function = native_function_addr();
636
637 // We can see racers trying to place the same native function into place. Once
638 // is plenty.
639 address current = *native_function;
640 if (current == function) return;
641 if (post_event_flag && JvmtiExport::should_post_native_method_bind() &&
642 function != NULL) {
643 // native_method_throw_unsatisfied_link_error_entry() should only
644 // be passed when post_event_flag is false.
645 assert(function !=
646 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
647 "post_event_flag mis-match");
648
649 // post the bind event, and possible change the bind function
650 JvmtiExport::post_native_method_bind(this, &function);
651 }
652 *native_function = function;
653 // This function can be called more than once. We must make sure that we always
654 // use the latest registered method -> check if a stub already has been generated.
655 // If so, we have to make it not_entrant.
656 nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
657 if (nm != NULL) {
658 nm->make_not_entrant();
659 }
660 }
661
662
663 bool Method::has_native_function() const {
664 if (is_method_handle_intrinsic())
665 return false; // special-cased in SharedRuntime::generate_native_wrapper
666 address func = native_function();
667 return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
668 }
669
670
671 void Method::clear_native_function() {
672 // Note: is_method_handle_intrinsic() is allowed here.
673 set_native_function(
674 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
675 !native_bind_event_is_interesting);
676 clear_code();
677 }
678
679 address Method::critical_native_function() {
680 methodHandle mh(this);
681 return NativeLookup::lookup_critical_entry(mh);
682 }
683
684
685 void Method::set_signature_handler(address handler) {
686 address* signature_handler = signature_handler_addr();
687 *signature_handler = handler;
688 }
689
690
691 bool Method::is_not_compilable(int comp_level) const {
692 if (number_of_breakpoints() > 0) {
693 return true;
694 }
695 if (is_method_handle_intrinsic()) {
696 return !is_synthetic(); // the generated adapters must be compiled
697 }
698 if (comp_level == CompLevel_any) {
699 return is_not_c1_compilable() || is_not_c2_compilable();
700 }
701 if (is_c1_compile(comp_level)) {
702 return is_not_c1_compilable();
703 }
704 if (is_c2_compile(comp_level)) {
705 return is_not_c2_compilable();
706 }
707 return false;
708 }
709
710 // call this when compiler finds that this method is not compilable
711 void Method::set_not_compilable(int comp_level, bool report) {
712 if (PrintCompilation && report) {
713 ttyLocker ttyl;
714 tty->print("made not compilable ");
715 this->print_short_name(tty);
716 int size = this->code_size();
717 if (size > 0)
718 tty->print(" (%d bytes)", size);
719 tty->cr();
720 }
721 if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
722 ttyLocker ttyl;
723 xtty->begin_elem("make_not_compilable thread='%d'", (int) os::current_thread_id());
724 xtty->method(this);
725 xtty->stamp();
726 xtty->end_elem();
727 }
728 if (comp_level == CompLevel_all) {
729 set_not_c1_compilable();
730 set_not_c2_compilable();
731 } else {
732 if (is_c1_compile(comp_level)) {
733 set_not_c1_compilable();
734 } else
735 if (is_c2_compile(comp_level)) {
736 set_not_c2_compilable();
737 }
738 }
739 CompilationPolicy::policy()->disable_compilation(this);
740 }
741
742 // Revert to using the interpreter and clear out the nmethod
743 void Method::clear_code() {
744
745 // this may be NULL if c2i adapters have not been made yet
746 // Only should happen at allocate time.
747 if (_adapter == NULL) {
748 _from_compiled_entry = NULL;
749 } else {
750 _from_compiled_entry = _adapter->get_c2i_entry();
751 }
752 OrderAccess::storestore();
753 _from_interpreted_entry = _i2i_entry;
754 OrderAccess::storestore();
755 _code = NULL;
756 }
757
758 // Called by class data sharing to remove any entry points (which are not shared)
759 void Method::unlink_method() {
760 _code = NULL;
761 _i2i_entry = NULL;
762 _from_interpreted_entry = NULL;
763 if (is_native()) {
764 *native_function_addr() = NULL;
765 set_signature_handler(NULL);
766 }
767 NOT_PRODUCT(set_compiled_invocation_count(0);)
768 invocation_counter()->reset();
769 backedge_counter()->reset();
770 _adapter = NULL;
771 _from_compiled_entry = NULL;
772 assert(_method_data == NULL, "unexpected method data?");
773 set_method_data(NULL);
774 set_interpreter_throwout_count(0);
775 set_interpreter_invocation_count(0);
776 }
777
778 // Called when the method_holder is getting linked. Setup entrypoints so the method
779 // is ready to be called from interpreter, compiler, and vtables.
780 void Method::link_method(methodHandle h_method, TRAPS) {
781 // If the code cache is full, we may reenter this function for the
782 // leftover methods that weren't linked.
783 if (_i2i_entry != NULL) return;
784
785 assert(_adapter == NULL, "init'd to NULL" );
786 assert( _code == NULL, "nothing compiled yet" );
787
788 // Setup interpreter entrypoint
789 assert(this == h_method(), "wrong h_method()" );
790 address entry = Interpreter::entry_for_method(h_method);
791 assert(entry != NULL, "interpreter entry must be non-null");
792 // Sets both _i2i_entry and _from_interpreted_entry
793 set_interpreter_entry(entry);
794 if (is_native() && !is_method_handle_intrinsic()) {
795 set_native_function(
796 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
797 !native_bind_event_is_interesting);
798 }
799
800 // Setup compiler entrypoint. This is made eagerly, so we do not need
801 // special handling of vtables. An alternative is to make adapters more
802 // lazily by calling make_adapter() from from_compiled_entry() for the
803 // normal calls. For vtable calls life gets more complicated. When a
804 // call-site goes mega-morphic we need adapters in all methods which can be
805 // called from the vtable. We need adapters on such methods that get loaded
806 // later. Ditto for mega-morphic itable calls. If this proves to be a
807 // problem we'll make these lazily later.
808 (void) make_adapters(h_method, CHECK);
809
810 // ONLY USE the h_method now as make_adapter may have blocked
811
812 }
813
814 address Method::make_adapters(methodHandle mh, TRAPS) {
815 // Adapters for compiled code are made eagerly here. They are fairly
816 // small (generally < 100 bytes) and quick to make (and cached and shared)
817 // so making them eagerly shouldn't be too expensive.
818 AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
819 if (adapter == NULL ) {
820 THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "out of space in CodeCache for adapters");
821 }
822
823 mh->set_adapter_entry(adapter);
824 mh->_from_compiled_entry = adapter->get_c2i_entry();
825 return adapter->get_c2i_entry();
826 }
827
828 // The verified_code_entry() must be called when a invoke is resolved
829 // on this method.
830
831 // It returns the compiled code entry point, after asserting not null.
832 // This function is called after potential safepoints so that nmethod
833 // or adapter that it points to is still live and valid.
834 // This function must not hit a safepoint!
835 address Method::verified_code_entry() {
836 debug_only(No_Safepoint_Verifier nsv;)
837 nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
838 if (code == NULL && UseCodeCacheFlushing) {
839 nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
840 if (saved_code != NULL) {
841 methodHandle method(this);
842 assert( ! saved_code->is_osr_method(), "should not get here for osr" );
843 set_code( method, saved_code );
844 }
845 }
846
847 assert(_from_compiled_entry != NULL, "must be set");
848 return _from_compiled_entry;
849 }
850
851 // Check that if an nmethod ref exists, it has a backlink to this or no backlink at all
852 // (could be racing a deopt).
853 // Not inline to avoid circular ref.
854 bool Method::check_code() const {
855 // cached in a register or local. There's a race on the value of the field.
856 nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
857 return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
858 }
859
860 // Install compiled code. Instantly it can execute.
861 void Method::set_code(methodHandle mh, nmethod *code) {
862 assert( code, "use clear_code to remove code" );
863 assert( mh->check_code(), "" );
864
865 guarantee(mh->adapter() != NULL, "Adapter blob must already exist!");
866
867 // These writes must happen in this order, because the interpreter will
868 // directly jump to from_interpreted_entry which jumps to an i2c adapter
869 // which jumps to _from_compiled_entry.
870 mh->_code = code; // Assign before allowing compiled code to exec
871
872 int comp_level = code->comp_level();
873 // In theory there could be a race here. In practice it is unlikely
874 // and not worth worrying about.
875 if (comp_level > mh->highest_comp_level()) {
876 mh->set_highest_comp_level(comp_level);
877 }
878
879 OrderAccess::storestore();
880 #ifdef SHARK
881 mh->_from_interpreted_entry = code->insts_begin();
882 #else //!SHARK
883 mh->_from_compiled_entry = code->verified_entry_point();
884 OrderAccess::storestore();
885 // Instantly compiled code can execute.
886 if (!mh->is_method_handle_intrinsic())
887 mh->_from_interpreted_entry = mh->get_i2c_entry();
888 #endif //!SHARK
889 }
890
891
892 bool Method::is_overridden_in(Klass* k) const {
893 InstanceKlass* ik = InstanceKlass::cast(k);
894
895 if (ik->is_interface()) return false;
896
897 // If method is an interface, we skip it - except if it
898 // is a miranda method
899 if (InstanceKlass::cast(method_holder())->is_interface()) {
900 // Check that method is not a miranda method
901 if (ik->lookup_method(name(), signature()) == NULL) {
902 // No implementation exist - so miranda method
903 return false;
904 }
905 return true;
906 }
907
908 assert(ik->is_subclass_of(method_holder()), "should be subklass");
909 assert(ik->vtable() != NULL, "vtable should exist");
910 if (vtable_index() == nonvirtual_vtable_index) {
911 return false;
912 } else {
913 Method* vt_m = ik->method_at_vtable(vtable_index());
914 return vt_m != this;
915 }
916 }
917
918
919 // give advice about whether this Method* should be cached or not
920 bool Method::should_not_be_cached() const {
921 if (is_old()) {
922 // This method has been redefined. It is either EMCP or obsolete
923 // and we don't want to cache it because that would pin the method
924 // down and prevent it from being collectible if and when it
925 // finishes executing.
926 return true;
927 }
928
929 // caching this method should be just fine
930 return false;
931 }
932
933 // Constant pool structure for invoke methods:
934 enum {
935 _imcp_invoke_name = 1, // utf8: 'invokeExact', etc.
936 _imcp_invoke_signature, // utf8: (variable Symbol*)
937 _imcp_limit
938 };
939
940 // Test if this method is an MH adapter frame generated by Java code.
941 // Cf. java/lang/invoke/InvokerBytecodeGenerator
942 bool Method::is_compiled_lambda_form() const {
943 return intrinsic_id() == vmIntrinsics::_compiledLambdaForm;
944 }
945
946 // Test if this method is an internal MH primitive method.
947 bool Method::is_method_handle_intrinsic() const {
948 vmIntrinsics::ID iid = intrinsic_id();
949 return (MethodHandles::is_signature_polymorphic(iid) &&
950 MethodHandles::is_signature_polymorphic_intrinsic(iid));
951 }
952
953 bool Method::has_member_arg() const {
954 vmIntrinsics::ID iid = intrinsic_id();
955 return (MethodHandles::is_signature_polymorphic(iid) &&
956 MethodHandles::has_member_arg(iid));
957 }
958
959 // Make an instance of a signature-polymorphic internal MH primitive.
960 methodHandle Method::make_method_handle_intrinsic(vmIntrinsics::ID iid,
961 Symbol* signature,
962 TRAPS) {
963 ResourceMark rm;
964 methodHandle empty;
965
966 KlassHandle holder = SystemDictionary::MethodHandle_klass();
967 Symbol* name = MethodHandles::signature_polymorphic_intrinsic_name(iid);
968 assert(iid == MethodHandles::signature_polymorphic_name_id(name), "");
969 if (TraceMethodHandles) {
970 tty->print_cr("make_method_handle_intrinsic MH.%s%s", name->as_C_string(), signature->as_C_string());
971 }
972
973 // invariant: cp->symbol_at_put is preceded by a refcount increment (more usually a lookup)
974 name->increment_refcount();
975 signature->increment_refcount();
976
977 int cp_length = _imcp_limit;
978 ClassLoaderData* loader_data = holder->class_loader_data();
979 constantPoolHandle cp;
980 {
981 ConstantPool* cp_oop = ConstantPool::allocate(loader_data, cp_length, CHECK_(empty));
982 cp = constantPoolHandle(THREAD, cp_oop);
983 }
984 cp->set_pool_holder(holder());
985 cp->symbol_at_put(_imcp_invoke_name, name);
986 cp->symbol_at_put(_imcp_invoke_signature, signature);
987 cp->set_preresolution();
988
989 // decide on access bits: public or not?
990 int flags_bits = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_FINAL);
991 bool must_be_static = MethodHandles::is_signature_polymorphic_static(iid);
992 if (must_be_static) flags_bits |= JVM_ACC_STATIC;
993 assert((flags_bits & JVM_ACC_PUBLIC) == 0, "do not expose these methods");
994
995 methodHandle m;
996 {
997 Method* m_oop = Method::allocate(loader_data, 0, accessFlags_from(flags_bits),
998 0, 0, 0, 0, CHECK_(empty));
999 m = methodHandle(THREAD, m_oop);
1000 }
1001 m->set_constants(cp());
1002 m->set_name_index(_imcp_invoke_name);
1003 m->set_signature_index(_imcp_invoke_signature);
1004 assert(MethodHandles::is_signature_polymorphic_name(m->name()), "");
1005 assert(m->signature() == signature, "");
1006 #ifdef CC_INTERP
1007 ResultTypeFinder rtf(signature);
1008 m->set_result_index(rtf.type());
1009 #endif
1010 m->compute_size_of_parameters(THREAD);
1011 m->init_intrinsic_id();
1012 assert(m->is_method_handle_intrinsic(), "");
1013 #ifdef ASSERT
1014 if (!MethodHandles::is_signature_polymorphic(m->intrinsic_id())) m->print();
1015 assert(MethodHandles::is_signature_polymorphic(m->intrinsic_id()), "must be an invoker");
1016 assert(m->intrinsic_id() == iid, "correctly predicted iid");
1017 #endif //ASSERT
1018
1019 // Finally, set up its entry points.
1020 assert(m->can_be_statically_bound(), "");
1021 m->set_vtable_index(Method::nonvirtual_vtable_index);
1022 m->link_method(m, CHECK_(empty));
1023
1024 if (TraceMethodHandles && (Verbose || WizardMode))
1025 m->print_on(tty);
1026
1027 return m;
1028 }
1029
1030 Klass* Method::check_non_bcp_klass(Klass* klass) {
1031 if (klass != NULL && Klass::cast(klass)->class_loader() != NULL) {
1032 if (Klass::cast(klass)->oop_is_objArray())
1033 klass = objArrayKlass::cast(klass)->bottom_klass();
1034 return klass;
1035 }
1036 return NULL;
1037 }
1038
1039
1040 methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
1041 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) {
1042 // Code below does not work for native methods - they should never get rewritten anyway
1043 assert(!m->is_native(), "cannot rewrite native methods");
1044 // Allocate new Method*
1045 AccessFlags flags = m->access_flags();
1046 int checked_exceptions_len = m->checked_exceptions_length();
1047 int localvariable_len = m->localvariable_table_length();
1048 int exception_table_len = m->exception_table_length();
1049
1050 ClassLoaderData* loader_data = m()->method_holder()->class_loader_data();
1051 Method* newm_oop = Method::allocate(loader_data,
1052 new_code_length,
1053 flags,
1054 new_compressed_linenumber_size,
1055 localvariable_len,
1056 exception_table_len,
1057 checked_exceptions_len,
1058 CHECK_(methodHandle()));
1059 methodHandle newm (THREAD, newm_oop);
1060 int new_method_size = newm->method_size();
1061
1062 // Create a shallow copy of Method part, but be careful to preserve the new ConstMethod*
1063 ConstMethod* newcm = newm->constMethod();
1064 int new_const_method_size = newm->constMethod()->size();
1065
1066 memcpy(newm(), m(), sizeof(Method));
1067
1068 // Create shallow copy of ConstMethod.
1069 memcpy(newcm, m->constMethod(), sizeof(ConstMethod));
1070
1071 // Reset correct method/const method, method size, and parameter info
1072 newm->set_constMethod(newcm);
1073 newm->constMethod()->set_code_size(new_code_length);
1074 newm->constMethod()->set_constMethod_size(new_const_method_size);
1075 newm->set_method_size(new_method_size);
1076 assert(newm->code_size() == new_code_length, "check");
1077 assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
1078 assert(newm->exception_table_length() == exception_table_len, "check");
1079 assert(newm->localvariable_table_length() == localvariable_len, "check");
1080 // Copy new byte codes
1081 memcpy(newm->code_base(), new_code, new_code_length);
1082 // Copy line number table
1083 if (new_compressed_linenumber_size > 0) {
1084 memcpy(newm->compressed_linenumber_table(),
1085 new_compressed_linenumber_table,
1086 new_compressed_linenumber_size);
1087 }
1088 // Copy checked_exceptions
1089 if (checked_exceptions_len > 0) {
1090 memcpy(newm->checked_exceptions_start(),
1091 m->checked_exceptions_start(),
1092 checked_exceptions_len * sizeof(CheckedExceptionElement));
1093 }
1094 // Copy exception table
1095 if (exception_table_len > 0) {
1096 memcpy(newm->exception_table_start(),
1097 m->exception_table_start(),
1098 exception_table_len * sizeof(ExceptionTableElement));
1099 }
1100 // Copy local variable number table
1101 if (localvariable_len > 0) {
1102 memcpy(newm->localvariable_table_start(),
1103 m->localvariable_table_start(),
1104 localvariable_len * sizeof(LocalVariableTableElement));
1105 }
1106 // Copy stackmap table
1107 if (m->has_stackmap_table()) {
1108 int code_attribute_length = m->stackmap_data()->length();
1109 Array<u1>* stackmap_data =
1110 MetadataFactory::new_array<u1>(loader_data, code_attribute_length, 0, CHECK_NULL);
1111 memcpy((void*)stackmap_data->adr_at(0),
1112 (void*)m->stackmap_data()->adr_at(0), code_attribute_length);
1113 newm->set_stackmap_data(stackmap_data);
1114 }
1115
1116 return newm;
1117 }
1118
1119 vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) {
1120 // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
1121 // because we are not loading from core libraries
1122 if (InstanceKlass::cast(holder)->class_loader() != NULL)
1123 return vmSymbols::NO_SID; // regardless of name, no intrinsics here
1124
1125 // see if the klass name is well-known:
1126 Symbol* klass_name = InstanceKlass::cast(holder)->name();
1127 return vmSymbols::find_sid(klass_name);
1128 }
1129
1130 void Method::init_intrinsic_id() {
1131 assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
1132 const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
1133 assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
1134 assert(intrinsic_id_size_in_bytes() == sizeof(_intrinsic_id), "");
1135
1136 // the klass name is well-known:
1137 vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
1138 assert(klass_id != vmSymbols::NO_SID, "caller responsibility");
1139
1140 // ditto for method and signature:
1141 vmSymbols::SID name_id = vmSymbols::find_sid(name());
1142 if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
1143 && name_id == vmSymbols::NO_SID)
1144 return;
1145 vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
1146 if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
1147 && sig_id == vmSymbols::NO_SID) return;
1148 jshort flags = access_flags().as_short();
1149
1150 vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
1151 if (id != vmIntrinsics::_none) {
1152 set_intrinsic_id(id);
1153 return;
1154 }
1155
1156 // A few slightly irregular cases:
1157 switch (klass_id) {
1158 case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
1159 // Second chance: check in regular Math.
1160 switch (name_id) {
1161 case vmSymbols::VM_SYMBOL_ENUM_NAME(min_name):
1162 case vmSymbols::VM_SYMBOL_ENUM_NAME(max_name):
1163 case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
1164 // pretend it is the corresponding method in the non-strict class:
1165 klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
1166 id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
1167 break;
1168 }
1169 break;
1170
1171 // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
1172 case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle):
1173 if (!is_native()) break;
1174 id = MethodHandles::signature_polymorphic_name_id(method_holder(), name());
1175 if (is_static() != MethodHandles::is_signature_polymorphic_static(id))
1176 id = vmIntrinsics::_none;
1177 break;
1178 }
1179
1180 if (id != vmIntrinsics::_none) {
1181 // Set up its iid. It is an alias method.
1182 set_intrinsic_id(id);
1183 return;
1184 }
1185 }
1186
1187 // These two methods are static since a GC may move the Method
1188 bool Method::load_signature_classes(methodHandle m, TRAPS) {
1189 if (THREAD->is_Compiler_thread()) {
1190 // There is nothing useful this routine can do from within the Compile thread.
1191 // Hopefully, the signature contains only well-known classes.
1192 // We could scan for this and return true/false, but the caller won't care.
1193 return false;
1194 }
1195 bool sig_is_loaded = true;
1196 Handle class_loader(THREAD, InstanceKlass::cast(m->method_holder())->class_loader());
1197 Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain());
1198 ResourceMark rm(THREAD);
1199 Symbol* signature = m->signature();
1200 for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
1201 if (ss.is_object()) {
1202 Symbol* sym = ss.as_symbol(CHECK_(false));
1203 Symbol* name = sym;
1204 Klass* klass = SystemDictionary::resolve_or_null(name, class_loader,
1205 protection_domain, THREAD);
1206 // We are loading classes eagerly. If a ClassNotFoundException or
1207 // a LinkageError was generated, be sure to ignore it.
1208 if (HAS_PENDING_EXCEPTION) {
1209 if (PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass()) ||
1210 PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
1211 CLEAR_PENDING_EXCEPTION;
1212 } else {
1213 return false;
1214 }
1215 }
1216 if( klass == NULL) { sig_is_loaded = false; }
1217 }
1218 }
1219 return sig_is_loaded;
1220 }
1221
1222 bool Method::has_unloaded_classes_in_signature(methodHandle m, TRAPS) {
1223 Handle class_loader(THREAD, InstanceKlass::cast(m->method_holder())->class_loader());
1224 Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain());
1225 ResourceMark rm(THREAD);
1226 Symbol* signature = m->signature();
1227 for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
1228 if (ss.type() == T_OBJECT) {
1229 Symbol* name = ss.as_symbol_or_null();
1230 if (name == NULL) return true;
1231 Klass* klass = SystemDictionary::find(name, class_loader, protection_domain, THREAD);
1232 if (klass == NULL) return true;
1233 }
1234 }
1235 return false;
1236 }
1237
1238 // Exposed so field engineers can debug VM
1239 void Method::print_short_name(outputStream* st) {
1240 ResourceMark rm;
1241 #ifdef PRODUCT
1242 st->print(" %s::", method_holder()->external_name());
1243 #else
1244 st->print(" %s::", method_holder()->internal_name());
1245 #endif
1246 name()->print_symbol_on(st);
1247 if (WizardMode) signature()->print_symbol_on(st);
1248 else if (MethodHandles::is_signature_polymorphic(intrinsic_id()))
1249 MethodHandles::print_as_basic_type_signature_on(st, signature(), true);
1250 }
1251
1252 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
1253 static void reorder_based_on_method_index(Array<Method*>* methods,
1254 Array<AnnotationArray*>* annotations,
1255 GrowableArray<AnnotationArray*>* temp_array) {
1256 if (annotations == NULL) {
1257 return;
1258 }
1259
1260 int length = methods->length();
1261 int i;
1262 // Copy to temp array
1263 temp_array->clear();
1264 for (i = 0; i < length; i++) {
1265 temp_array->append(annotations->at(i));
1266 }
1267
1268 // Copy back using old method indices
1269 for (i = 0; i < length; i++) {
1270 Method* m = methods->at(i);
1271 annotations->at_put(i, temp_array->at(m->method_idnum()));
1272 }
1273 }
1274
1275 // Comparer for sorting an object array containing
1276 // Method*s.
1277 static int method_comparator(Method* a, Method* b) {
1278 return a->name()->fast_compare(b->name());
1279 }
1280
1281 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
1282 void Method::sort_methods(Array<Method*>* methods,
1283 Array<AnnotationArray*>* methods_annotations,
1284 Array<AnnotationArray*>* methods_parameter_annotations,
1285 Array<AnnotationArray*>* methods_default_annotations,
1286 bool idempotent) {
1287 int length = methods->length();
1288 if (length > 1) {
1289 bool do_annotations = false;
1290 if (methods_annotations != NULL ||
1291 methods_parameter_annotations != NULL ||
1292 methods_default_annotations != NULL) {
1293 do_annotations = true;
1294 }
1295 if (do_annotations) {
1296 // Remember current method ordering so we can reorder annotations
1297 for (int i = 0; i < length; i++) {
1298 Method* m = methods->at(i);
1299 m->set_method_idnum(i);
1300 }
1301 }
1302 {
1303 No_Safepoint_Verifier nsv;
1304 QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
1305 }
1306
1307 // Sort annotations if necessary
1308 assert(methods_annotations == NULL || methods_annotations->length() == methods->length(), "");
1309 assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
1310 assert(methods_default_annotations == NULL || methods_default_annotations->length() == methods->length(), "");
1311 if (do_annotations) {
1312 ResourceMark rm;
1313 // Allocate temporary storage
1314 GrowableArray<AnnotationArray*>* temp_array = new GrowableArray<AnnotationArray*>(length);
1315 reorder_based_on_method_index(methods, methods_annotations, temp_array);
1316 reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array);
1317 reorder_based_on_method_index(methods, methods_default_annotations, temp_array);
1318 }
1319
1320 // Reset method ordering
1321 for (int i = 0; i < length; i++) {
1322 Method* m = methods->at(i);
1323 m->set_method_idnum(i);
1324 }
1325 }
1326 }
1327
1328
1329 //-----------------------------------------------------------------------------------
1330 // Non-product code
1331
1332 #ifndef PRODUCT
1333 class SignatureTypePrinter : public SignatureTypeNames {
1334 private:
1335 outputStream* _st;
1336 bool _use_separator;
1337
1338 void type_name(const char* name) {
1339 if (_use_separator) _st->print(", ");
1340 _st->print(name);
1341 _use_separator = true;
1342 }
1343
1344 public:
1345 SignatureTypePrinter(Symbol* signature, outputStream* st) : SignatureTypeNames(signature) {
1346 _st = st;
1347 _use_separator = false;
1348 }
1349
1350 void print_parameters() { _use_separator = false; iterate_parameters(); }
1351 void print_returntype() { _use_separator = false; iterate_returntype(); }
1352 };
1353
1354
1355 void Method::print_name(outputStream* st) {
1356 Thread *thread = Thread::current();
1357 ResourceMark rm(thread);
1358 SignatureTypePrinter sig(signature(), st);
1359 st->print("%s ", is_static() ? "static" : "virtual");
1360 sig.print_returntype();
1361 st->print(" %s.", method_holder()->internal_name());
1362 name()->print_symbol_on(st);
1363 st->print("(");
1364 sig.print_parameters();
1365 st->print(")");
1366 }
1367
1368
1369 void Method::print_codes_on(outputStream* st) const {
1370 print_codes_on(0, code_size(), st);
1371 }
1372
1373 void Method::print_codes_on(int from, int to, outputStream* st) const {
1374 Thread *thread = Thread::current();
1375 ResourceMark rm(thread);
1376 methodHandle mh (thread, (Method*)this);
1377 BytecodeStream s(mh);
1378 s.set_interval(from, to);
1379 BytecodeTracer::set_closure(BytecodeTracer::std_closure());
1380 while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
1381 }
1382 #endif // not PRODUCT
1383
1384
1385 // Simple compression of line number tables. We use a regular compressed stream, except that we compress deltas
1386 // between (bci,line) pairs since they are smaller. If (bci delta, line delta) fits in (5-bit unsigned, 3-bit unsigned)
1387 // we save it as one byte, otherwise we write a 0xFF escape character and use regular compression. 0x0 is used
1388 // as end-of-stream terminator.
1389
1390 void CompressedLineNumberWriteStream::write_pair_regular(int bci_delta, int line_delta) {
1391 // bci and line number does not compress into single byte.
1392 // Write out escape character and use regular compression for bci and line number.
1393 write_byte((jubyte)0xFF);
1394 write_signed_int(bci_delta);
1395 write_signed_int(line_delta);
1396 }
1397
1398 // See comment in method.hpp which explains why this exists.
1399 #if defined(_M_AMD64) && _MSC_VER >= 1400
1400 #pragma optimize("", off)
1401 void CompressedLineNumberWriteStream::write_pair(int bci, int line) {
1402 write_pair_inline(bci, line);
1403 }
1404 #pragma optimize("", on)
1405 #endif
1406
1407 CompressedLineNumberReadStream::CompressedLineNumberReadStream(u_char* buffer) : CompressedReadStream(buffer) {
1408 _bci = 0;
1409 _line = 0;
1410 };
1411
1412
1413 bool CompressedLineNumberReadStream::read_pair() {
1414 jubyte next = read_byte();
1415 // Check for terminator
1416 if (next == 0) return false;
1417 if (next == 0xFF) {
1418 // Escape character, regular compression used
1419 _bci += read_signed_int();
1420 _line += read_signed_int();
1421 } else {
1422 // Single byte compression used
1423 _bci += next >> 3;
1424 _line += next & 0x7;
1425 }
1426 return true;
1427 }
1428
1429
1430 Bytecodes::Code Method::orig_bytecode_at(int bci) const {
1431 BreakpointInfo* bp = InstanceKlass::cast(method_holder())->breakpoints();
1432 for (; bp != NULL; bp = bp->next()) {
1433 if (bp->match(this, bci)) {
1434 return bp->orig_bytecode();
1435 }
1436 }
1437 ShouldNotReachHere();
1438 return Bytecodes::_shouldnotreachhere;
1439 }
1440
1441 void Method::set_orig_bytecode_at(int bci, Bytecodes::Code code) {
1442 assert(code != Bytecodes::_breakpoint, "cannot patch breakpoints this way");
1443 BreakpointInfo* bp = InstanceKlass::cast(method_holder())->breakpoints();
1444 for (; bp != NULL; bp = bp->next()) {
1445 if (bp->match(this, bci)) {
1446 bp->set_orig_bytecode(code);
1447 // and continue, in case there is more than one
1448 }
1449 }
1450 }
1451
1452 void Method::set_breakpoint(int bci) {
1453 InstanceKlass* ik = InstanceKlass::cast(method_holder());
1454 BreakpointInfo *bp = new BreakpointInfo(this, bci);
1455 bp->set_next(ik->breakpoints());
1456 ik->set_breakpoints(bp);
1457 // do this last:
1458 bp->set(this);
1459 }
1460
1461 static void clear_matches(Method* m, int bci) {
1462 InstanceKlass* ik = InstanceKlass::cast(m->method_holder());
1463 BreakpointInfo* prev_bp = NULL;
1464 BreakpointInfo* next_bp;
1465 for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = next_bp) {
1466 next_bp = bp->next();
1467 // bci value of -1 is used to delete all breakpoints in method m (ex: clear_all_breakpoint).
1468 if (bci >= 0 ? bp->match(m, bci) : bp->match(m)) {
1469 // do this first:
1470 bp->clear(m);
1471 // unhook it
1472 if (prev_bp != NULL)
1473 prev_bp->set_next(next_bp);
1474 else
1475 ik->set_breakpoints(next_bp);
1476 delete bp;
1477 // When class is redefined JVMTI sets breakpoint in all versions of EMCP methods
1478 // at same location. So we have multiple matching (method_index and bci)
1479 // BreakpointInfo nodes in BreakpointInfo list. We should just delete one
1480 // breakpoint for clear_breakpoint request and keep all other method versions
1481 // BreakpointInfo for future clear_breakpoint request.
1482 // bcivalue of -1 is used to clear all breakpoints (see clear_all_breakpoints)
1483 // which is being called when class is unloaded. We delete all the Breakpoint
1484 // information for all versions of method. We may not correctly restore the original
1485 // bytecode in all method versions, but that is ok. Because the class is being unloaded
1486 // so these methods won't be used anymore.
1487 if (bci >= 0) {
1488 break;
1489 }
1490 } else {
1491 // This one is a keeper.
1492 prev_bp = bp;
1493 }
1494 }
1495 }
1496
1497 void Method::clear_breakpoint(int bci) {
1498 assert(bci >= 0, "");
1499 clear_matches(this, bci);
1500 }
1501
1502 void Method::clear_all_breakpoints() {
1503 clear_matches(this, -1);
1504 }
1505
1506
1507 int Method::invocation_count() {
1508 if (TieredCompilation) {
1509 MethodData* const mdo = method_data();
1510 if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
1511 return InvocationCounter::count_limit;
1512 } else {
1513 return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
1514 }
1515 } else {
1516 return invocation_counter()->count();
1517 }
1518 }
1519
1520 int Method::backedge_count() {
1521 if (TieredCompilation) {
1522 MethodData* const mdo = method_data();
1523 if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
1524 return InvocationCounter::count_limit;
1525 } else {
1526 return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
1527 }
1528 } else {
1529 return backedge_counter()->count();
1530 }
1531 }
1532
1533 int Method::highest_comp_level() const {
1534 MethodData* mdo = method_data();
1535 if (mdo != NULL) {
1536 return mdo->highest_comp_level();
1537 } else {
1538 return CompLevel_none;
1539 }
1540 }
1541
1542 int Method::highest_osr_comp_level() const {
1543 MethodData* mdo = method_data();
1544 if (mdo != NULL) {
1545 return mdo->highest_osr_comp_level();
1546 } else {
1547 return CompLevel_none;
1548 }
1549 }
1550
1551 void Method::set_highest_comp_level(int level) {
1552 MethodData* mdo = method_data();
1553 if (mdo != NULL) {
1554 mdo->set_highest_comp_level(level);
1555 }
1556 }
1557
1558 void Method::set_highest_osr_comp_level(int level) {
1559 MethodData* mdo = method_data();
1560 if (mdo != NULL) {
1561 mdo->set_highest_osr_comp_level(level);
1562 }
1563 }
1564
1565 BreakpointInfo::BreakpointInfo(Method* m, int bci) {
1566 _bci = bci;
1567 _name_index = m->name_index();
1568 _signature_index = m->signature_index();
1569 _orig_bytecode = (Bytecodes::Code) *m->bcp_from(_bci);
1570 if (_orig_bytecode == Bytecodes::_breakpoint)
1571 _orig_bytecode = m->orig_bytecode_at(_bci);
1572 _next = NULL;
1573 }
1574
1575 void BreakpointInfo::set(Method* method) {
1576 #ifdef ASSERT
1577 {
1578 Bytecodes::Code code = (Bytecodes::Code) *method->bcp_from(_bci);
1579 if (code == Bytecodes::_breakpoint)
1580 code = method->orig_bytecode_at(_bci);
1581 assert(orig_bytecode() == code, "original bytecode must be the same");
1582 }
1583 #endif
1584 *method->bcp_from(_bci) = Bytecodes::_breakpoint;
1585 method->incr_number_of_breakpoints();
1586 SystemDictionary::notice_modification();
1587 {
1588 // Deoptimize all dependents on this method
1589 Thread *thread = Thread::current();
1590 HandleMark hm(thread);
1591 methodHandle mh(thread, method);
1592 Universe::flush_dependents_on_method(mh);
1593 }
1594 }
1595
1596 void BreakpointInfo::clear(Method* method) {
1597 *method->bcp_from(_bci) = orig_bytecode();
1598 assert(method->number_of_breakpoints() > 0, "must not go negative");
1599 method->decr_number_of_breakpoints();
1600 }
1601
1602 // jmethodID handling
1603
1604 // This is a block allocating object, sort of like JNIHandleBlock, only a
1605 // lot simpler. There aren't many of these, they aren't long, they are rarely
1606 // deleted and so we can do some suboptimal things.
1607 // It's allocated on the CHeap because once we allocate a jmethodID, we can
1608 // never get rid of it.
1609 // It would be nice to be able to parameterize the number of methods for
1610 // the null_class_loader but then we'd have to turn this and ClassLoaderData
1611 // into templates.
1612
1613 // I feel like this brain dead class should exist somewhere in the STL
1614
1615 class JNIMethodBlock : public CHeapObj<mtClass> {
1616 enum { number_of_methods = 8 };
1617
1618 Method* _methods[number_of_methods];
1619 int _top;
1620 JNIMethodBlock* _next;
1621 public:
1622 static Method* const _free_method;
1623
1624 JNIMethodBlock() : _next(NULL), _top(0) {
1625 for (int i = 0; i< number_of_methods; i++) _methods[i] = _free_method;
1626 }
1627
1628 Method** add_method(Method* m) {
1629 if (_top < number_of_methods) {
1630 // top points to the next free entry.
1631 int i = _top;
1632 _methods[i] = m;
1633 _top++;
1634 return &_methods[i];
1635 } else if (_top == number_of_methods) {
1636 // if the next free entry ran off the block see if there's a free entry
1637 for (int i = 0; i< number_of_methods; i++) {
1638 if (_methods[i] == _free_method) {
1639 _methods[i] = m;
1640 return &_methods[i];
1641 }
1642 }
1643 // Only check each block once for frees. They're very unlikely.
1644 // Increment top past the end of the block.
1645 _top++;
1646 }
1647 // need to allocate a next block.
1648 if (_next == NULL) {
1649 _next = new JNIMethodBlock();
1650 }
1651 return _next->add_method(m);
1652 }
1653
1654 bool contains(Method** m) {
1655 for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
1656 for (int i = 0; i< number_of_methods; i++) {
1657 if (&(b->_methods[i]) == m) {
1658 return true;
1659 }
1660 }
1661 }
1662 return false; // not found
1663 }
1664
1665 // Doesn't really destroy it, just marks it as free so it can be reused.
1666 void destroy_method(Method** m) {
1667 #ifdef ASSERT
1668 assert(contains(m), "should be a methodID");
1669 #endif // ASSERT
1670 *m = _free_method;
1671 }
1672
1673 // During class unloading the methods are cleared, which is different
1674 // than freed.
1675 void clear_all_methods() {
1676 for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
1677 for (int i = 0; i< number_of_methods; i++) {
1678 _methods[i] = NULL;
1679 }
1680 }
1681 }
1682 #ifndef PRODUCT
1683 int count_methods() {
1684 // count all allocated methods
1685 int count = 0;
1686 for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
1687 for (int i = 0; i< number_of_methods; i++) {
1688 if (_methods[i] != _free_method) count++;
1689 }
1690 }
1691 return count;
1692 }
1693 #endif // PRODUCT
1694 };
1695
1696 // Something that can't be mistaken for an address or a markOop
1697 Method* const JNIMethodBlock::_free_method = (Method*)55;
1698
1699 // Add a method id to the jmethod_ids
1700 jmethodID Method::make_jmethod_id(ClassLoaderData* loader_data, Method* m) {
1701 ClassLoaderData* cld = loader_data;
1702
1703 if (!SafepointSynchronize::is_at_safepoint()) {
1704 // Have to add jmethod_ids() to class loader data thread-safely.
1705 // Also have to add the method to the list safely, which the cld lock
1706 // protects as well.
1707 MutexLockerEx ml(cld->metaspace_lock(), Mutex::_no_safepoint_check_flag);
1708 if (cld->jmethod_ids() == NULL) {
1709 cld->set_jmethod_ids(new JNIMethodBlock());
1710 }
1711 // jmethodID is a pointer to Method*
1712 return (jmethodID)cld->jmethod_ids()->add_method(m);
1713 } else {
1714 // At safepoint, we are single threaded and can set this.
1715 if (cld->jmethod_ids() == NULL) {
1716 cld->set_jmethod_ids(new JNIMethodBlock());
1717 }
1718 // jmethodID is a pointer to Method*
1719 return (jmethodID)cld->jmethod_ids()->add_method(m);
1720 }
1721 }
1722
1723 // Mark a jmethodID as free. This is called when there is a data race in
1724 // InstanceKlass while creating the jmethodID cache.
1725 void Method::destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID m) {
1726 ClassLoaderData* cld = loader_data;
1727 Method** ptr = (Method**)m;
1728 assert(cld->jmethod_ids() != NULL, "should have method handles");
1729 cld->jmethod_ids()->destroy_method(ptr);
1730 }
1731
1732 void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* new_method) {
1733 // Can't assert the method_holder is the same because the new method has the
1734 // scratch method holder.
1735 assert(resolve_jmethod_id(jmid)->method_holder()->class_loader()
1736 == new_method->method_holder()->class_loader(),
1737 "changing to a different class loader");
1738 // Just change the method in place, jmethodID pointer doesn't change.
1739 *((Method**)jmid) = new_method;
1740 }
1741
1742 bool Method::is_method_id(jmethodID mid) {
1743 Method* m = resolve_jmethod_id(mid);
1744 assert(m != NULL, "should be called with non-null method");
1745 InstanceKlass* ik = InstanceKlass::cast(m->method_holder());
1746 ClassLoaderData* cld = ik->class_loader_data();
1747 if (cld->jmethod_ids() == NULL) return false;
1748 return (cld->jmethod_ids()->contains((Method**)mid));
1749 }
1750
1751 Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
1752 if (mid == NULL) return NULL;
1753 Method* o = resolve_jmethod_id(mid);
1754 if (o == NULL || o == JNIMethodBlock::_free_method || !((Metadata*)o)->is_method()) {
1755 return NULL;
1756 }
1757 return o;
1758 };
1759
1760 void Method::set_on_stack(const bool value) {
1761 // Set both the method itself and its constant pool. The constant pool
1762 // on stack means some method referring to it is also on the stack.
1763 _access_flags.set_on_stack(value);
1764 constants()->set_on_stack(value);
1765 if (value) MetadataOnStackMark::record(this);
1766 }
1767
1768 // Called when the class loader is unloaded to make all methods weak.
1769 void Method::clear_jmethod_ids(ClassLoaderData* loader_data) {
1770 loader_data->jmethod_ids()->clear_all_methods();
1771 }
1772
1773 #ifndef PRODUCT
1774 void Method::print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) {
1775 out->print_cr("jni_method_id count = %d", loader_data->jmethod_ids()->count_methods());
1776 }
1777 #endif // PRODUCT
1778
1779
1780 // Printing
1781
1782 #ifndef PRODUCT
1783
1784 void Method::print_on(outputStream* st) const {
1785 ResourceMark rm;
1786 assert(is_method(), "must be method");
1787 st->print_cr(internal_name());
1788 // get the effect of PrintOopAddress, always, for methods:
1789 st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)this);
1790 st->print (" - method holder: "); method_holder()->print_value_on(st); st->cr();
1791 st->print (" - constants: "INTPTR_FORMAT" ", (address)constants());
1792 constants()->print_value_on(st); st->cr();
1793 st->print (" - access: 0x%x ", access_flags().as_int()); access_flags().print_on(st); st->cr();
1794 st->print (" - name: "); name()->print_value_on(st); st->cr();
1795 st->print (" - signature: "); signature()->print_value_on(st); st->cr();
1796 st->print_cr(" - max stack: %d", max_stack());
1797 st->print_cr(" - max locals: %d", max_locals());
1798 st->print_cr(" - size of params: %d", size_of_parameters());
1799 st->print_cr(" - method size: %d", method_size());
1800 if (intrinsic_id() != vmIntrinsics::_none)
1801 st->print_cr(" - intrinsic id: %d %s", intrinsic_id(), vmIntrinsics::name_at(intrinsic_id()));
1802 if (highest_comp_level() != CompLevel_none)
1803 st->print_cr(" - highest level: %d", highest_comp_level());
1804 st->print_cr(" - vtable index: %d", _vtable_index);
1805 st->print_cr(" - i2i entry: " INTPTR_FORMAT, interpreter_entry());
1806 st->print( " - adapters: ");
1807 AdapterHandlerEntry* a = ((Method*)this)->adapter();
1808 if (a == NULL)
1809 st->print_cr(INTPTR_FORMAT, a);
1810 else
1811 a->print_adapter_on(st);
1812 st->print_cr(" - compiled entry " INTPTR_FORMAT, from_compiled_entry());
1813 st->print_cr(" - code size: %d", code_size());
1814 if (code_size() != 0) {
1815 st->print_cr(" - code start: " INTPTR_FORMAT, code_base());
1816 st->print_cr(" - code end (excl): " INTPTR_FORMAT, code_base() + code_size());
1817 }
1818 if (method_data() != NULL) {
1819 st->print_cr(" - method data: " INTPTR_FORMAT, (address)method_data());
1820 }
1821 st->print_cr(" - checked ex length: %d", checked_exceptions_length());
1822 if (checked_exceptions_length() > 0) {
1823 CheckedExceptionElement* table = checked_exceptions_start();
1824 st->print_cr(" - checked ex start: " INTPTR_FORMAT, table);
1825 if (Verbose) {
1826 for (int i = 0; i < checked_exceptions_length(); i++) {
1827 st->print_cr(" - throws %s", constants()->printable_name_at(table[i].class_cp_index));
1828 }
1829 }
1830 }
1831 if (has_linenumber_table()) {
1832 u_char* table = compressed_linenumber_table();
1833 st->print_cr(" - linenumber start: " INTPTR_FORMAT, table);
1834 if (Verbose) {
1835 CompressedLineNumberReadStream stream(table);
1836 while (stream.read_pair()) {
1837 st->print_cr(" - line %d: %d", stream.line(), stream.bci());
1838 }
1839 }
1840 }
1841 st->print_cr(" - localvar length: %d", localvariable_table_length());
1842 if (localvariable_table_length() > 0) {
1843 LocalVariableTableElement* table = localvariable_table_start();
1844 st->print_cr(" - localvar start: " INTPTR_FORMAT, table);
1845 if (Verbose) {
1846 for (int i = 0; i < localvariable_table_length(); i++) {
1847 int bci = table[i].start_bci;
1848 int len = table[i].length;
1849 const char* name = constants()->printable_name_at(table[i].name_cp_index);
1850 const char* desc = constants()->printable_name_at(table[i].descriptor_cp_index);
1851 int slot = table[i].slot;
1852 st->print_cr(" - %s %s bci=%d len=%d slot=%d", desc, name, bci, len, slot);
1853 }
1854 }
1855 }
1856 if (code() != NULL) {
1857 st->print (" - compiled code: ");
1858 code()->print_value_on(st);
1859 }
1860 if (is_native()) {
1861 st->print_cr(" - native function: " INTPTR_FORMAT, native_function());
1862 st->print_cr(" - signature handler: " INTPTR_FORMAT, signature_handler());
1863 }
1864 }
1865
1866 #endif //PRODUCT
1867
1868 void Method::print_value_on(outputStream* st) const {
1869 assert(is_method(), "must be method");
1870 st->print_cr(internal_name());
1871 print_address_on(st);
1872 st->print(" ");
1873 name()->print_value_on(st);
1874 st->print(" ");
1875 signature()->print_value_on(st);
1876 st->print(" in ");
1877 method_holder()->print_value_on(st);
1878 if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
1879 if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
1880 }
1881
1882
1883 // Verification
1884
1885 void Method::verify_on(outputStream* st) {
1886 guarantee(is_method(), "object must be method");
1887 guarantee(is_metadata(), "should be metadata");
1888 guarantee(constants()->is_constantPool(), "should be constant pool");
1889 guarantee(constants()->is_metadata(), "should be metadata");
1890 guarantee(constMethod()->is_constMethod(), "should be ConstMethod*");
1891 guarantee(constMethod()->is_metadata(), "should be metadata");
1892 MethodData* md = method_data();
1893 guarantee(md == NULL ||
1894 md->is_metadata(), "should be in permspace");
1895 guarantee(md == NULL ||
1896 md->is_methodData(), "should be method data");
1897 }