comparison src/share/vm/oops/methodOop.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children d8b3ef7ee3e5
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_methodOop.cpp.incl"
27
28
29 // Implementation of methodOopDesc
30
31 address methodOopDesc::get_i2c_entry() {
32 assert(_adapter != NULL, "must have");
33 return _adapter->get_i2c_entry();
34 }
35
36 address methodOopDesc::get_c2i_entry() {
37 assert(_adapter != NULL, "must have");
38 return _adapter->get_c2i_entry();
39 }
40
41 address methodOopDesc::get_c2i_unverified_entry() {
42 assert(_adapter != NULL, "must have");
43 return _adapter->get_c2i_unverified_entry();
44 }
45
46 char* methodOopDesc::name_and_sig_as_C_string() {
47 return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature());
48 }
49
50 char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) {
51 return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature(), buf, size);
52 }
53
54 char* methodOopDesc::name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature) {
55 const char* klass_name = klass->external_name();
56 int klass_name_len = (int)strlen(klass_name);
57 int method_name_len = method_name->utf8_length();
58 int len = klass_name_len + 1 + method_name_len + signature->utf8_length();
59 char* dest = NEW_RESOURCE_ARRAY(char, len + 1);
60 strcpy(dest, klass_name);
61 dest[klass_name_len] = '.';
62 strcpy(&dest[klass_name_len + 1], method_name->as_C_string());
63 strcpy(&dest[klass_name_len + 1 + method_name_len], signature->as_C_string());
64 dest[len] = 0;
65 return dest;
66 }
67
68 char* methodOopDesc::name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature, char* buf, int size) {
69 symbolOop klass_name = klass->name();
70 klass_name->as_klass_external_name(buf, size);
71 int len = (int)strlen(buf);
72
73 if (len < size - 1) {
74 buf[len++] = '.';
75
76 method_name->as_C_string(&(buf[len]), size - len);
77 len = (int)strlen(buf);
78
79 signature->as_C_string(&(buf[len]), size - len);
80 }
81
82 return buf;
83 }
84
85 int methodOopDesc::fast_exception_handler_bci_for(KlassHandle ex_klass, int throw_bci, TRAPS) {
86 // exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index)
87 const int beg_bci_offset = 0;
88 const int end_bci_offset = 1;
89 const int handler_bci_offset = 2;
90 const int klass_index_offset = 3;
91 const int entry_size = 4;
92 // access exception table
93 typeArrayHandle table (THREAD, constMethod()->exception_table());
94 int length = table->length();
95 assert(length % entry_size == 0, "exception table format has changed");
96 // iterate through all entries sequentially
97 constantPoolHandle pool(THREAD, constants());
98 for (int i = 0; i < length; i += entry_size) {
99 int beg_bci = table->int_at(i + beg_bci_offset);
100 int end_bci = table->int_at(i + end_bci_offset);
101 assert(beg_bci <= end_bci, "inconsistent exception table");
102 if (beg_bci <= throw_bci && throw_bci < end_bci) {
103 // exception handler bci range covers throw_bci => investigate further
104 int handler_bci = table->int_at(i + handler_bci_offset);
105 int klass_index = table->int_at(i + klass_index_offset);
106 if (klass_index == 0) {
107 return handler_bci;
108 } else if (ex_klass.is_null()) {
109 return handler_bci;
110 } else {
111 // we know the exception class => get the constraint class
112 // this may require loading of the constraint class; if verification
113 // fails or some other exception occurs, return handler_bci
114 klassOop k = pool->klass_at(klass_index, CHECK_(handler_bci));
115 KlassHandle klass = KlassHandle(THREAD, k);
116 assert(klass.not_null(), "klass not loaded");
117 if (ex_klass->is_subtype_of(klass())) {
118 return handler_bci;
119 }
120 }
121 }
122 }
123
124 return -1;
125 }
126
127 methodOop methodOopDesc::method_from_bcp(address bcp) {
128 debug_only(static int count = 0; count++);
129 assert(Universe::heap()->is_in_permanent(bcp), "bcp not in perm_gen");
130 // TO DO: this may be unsafe in some configurations
131 HeapWord* p = Universe::heap()->block_start(bcp);
132 assert(Universe::heap()->block_is_obj(p), "must be obj");
133 assert(oop(p)->is_constMethod(), "not a method");
134 return constMethodOop(p)->method();
135 }
136
137
138 void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
139
140 Thread* myThread = Thread::current();
141 methodHandle h_this(myThread, this);
142 #ifdef ASSERT
143 bool has_capability = myThread->is_VM_thread() ||
144 myThread->is_ConcurrentGC_thread() ||
145 myThread->is_GC_task_thread();
146
147 if (!has_capability) {
148 if (!VerifyStack && !VerifyLastFrame) {
149 // verify stack calls this outside VM thread
150 warning("oopmap should only be accessed by the "
151 "VM, GC task or CMS threads (or during debugging)");
152 InterpreterOopMap local_mask;
153 instanceKlass::cast(method_holder())->mask_for(h_this, bci, &local_mask);
154 local_mask.print();
155 }
156 }
157 #endif
158 instanceKlass::cast(method_holder())->mask_for(h_this, bci, mask);
159 return;
160 }
161
162
163 int methodOopDesc::bci_from(address bcp) const {
164 assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
165 return bcp - code_base();
166 }
167
168
169 // Return (int)bcx if it appears to be a valid BCI.
170 // Return bci_from((address)bcx) if it appears to be a valid BCP.
171 // Return -1 otherwise.
172 // Used by profiling code, when invalid data is a possibility.
173 // The caller is responsible for validating the methodOop itself.
174 int methodOopDesc::validate_bci_from_bcx(intptr_t bcx) const {
175 // keep bci as -1 if not a valid bci
176 int bci = -1;
177 if (bcx == 0 || (address)bcx == code_base()) {
178 // code_size() may return 0 and we allow 0 here
179 // the method may be native
180 bci = 0;
181 } else if (frame::is_bci(bcx)) {
182 if (bcx < code_size()) {
183 bci = (int)bcx;
184 }
185 } else if (contains((address)bcx)) {
186 bci = (address)bcx - code_base();
187 }
188 // Assert that if we have dodged any asserts, bci is negative.
189 assert(bci == -1 || bci == bci_from(bcp_from(bci)), "sane bci if >=0");
190 return bci;
191 }
192
193 address methodOopDesc::bcp_from(int bci) const {
194 assert((is_native() && bci == 0) || (!is_native() && 0 <= bci && bci < code_size()), "illegal bci");
195 address bcp = code_base() + bci;
196 assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
197 return bcp;
198 }
199
200
201 int methodOopDesc::object_size(bool is_native) {
202 // If native, then include pointers for native_function and signature_handler
203 int extra_bytes = (is_native) ? 2*sizeof(address*) : 0;
204 int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
205 return align_object_size(header_size() + extra_words);
206 }
207
208
209 symbolOop methodOopDesc::klass_name() const {
210 klassOop k = method_holder();
211 assert(k->is_klass(), "must be klass");
212 instanceKlass* ik = (instanceKlass*) k->klass_part();
213 return ik->name();
214 }
215
216
217 void methodOopDesc::set_interpreter_kind() {
218 int kind = Interpreter::method_kind(methodOop(this));
219 assert(kind != Interpreter::invalid,
220 "interpreter entry must be valid");
221 set_interpreter_kind(kind);
222 }
223
224
225 // Attempt to return method oop to original state. Clear any pointers
226 // (to objects outside the shared spaces). We won't be able to predict
227 // where they should point in a new JVM. Further initialize some
228 // entries now in order allow them to be write protected later.
229
230 void methodOopDesc::remove_unshareable_info() {
231 unlink_method();
232 set_interpreter_kind();
233 }
234
235
236 bool methodOopDesc::was_executed_more_than(int n) const {
237 // Invocation counter is reset when the methodOop is compiled.
238 // If the method has compiled code we therefore assume it has
239 // be excuted more than n times.
240 if (is_accessor() || is_empty_method() || (code() != NULL)) {
241 // interpreter doesn't bump invocation counter of trivial methods
242 // compiler does not bump invocation counter of compiled methods
243 return true;
244 } else if (_invocation_counter.carry()) {
245 // The carry bit is set when the counter overflows and causes
246 // a compilation to occur. We don't know how many times
247 // the counter has been reset, so we simply assume it has
248 // been executed more than n times.
249 return true;
250 } else {
251 return invocation_count() > n;
252 }
253 }
254
255 #ifndef PRODUCT
256 void methodOopDesc::print_invocation_count() const {
257 if (is_static()) tty->print("static ");
258 if (is_final()) tty->print("final ");
259 if (is_synchronized()) tty->print("synchronized ");
260 if (is_native()) tty->print("native ");
261 method_holder()->klass_part()->name()->print_symbol_on(tty);
262 tty->print(".");
263 name()->print_symbol_on(tty);
264 signature()->print_symbol_on(tty);
265
266 if (WizardMode) {
267 // dump the size of the byte codes
268 tty->print(" {%d}", code_size());
269 }
270 tty->cr();
271
272 tty->print_cr (" interpreter_invocation_count: %8d ", interpreter_invocation_count());
273 tty->print_cr (" invocation_counter: %8d ", invocation_count());
274 tty->print_cr (" backedge_counter: %8d ", backedge_count());
275 if (CountCompiledCalls) {
276 tty->print_cr (" compiled_invocation_count: %8d ", compiled_invocation_count());
277 }
278
279 }
280 #endif
281
282 // Build a methodDataOop object to hold information about this method
283 // collected in the interpreter.
284 void methodOopDesc::build_interpreter_method_data(methodHandle method, TRAPS) {
285 // Grab a lock here to prevent multiple
286 // methodDataOops from being created.
287 MutexLocker ml(MethodData_lock, THREAD);
288 if (method->method_data() == NULL) {
289 methodDataOop method_data = oopFactory::new_methodData(method, CHECK);
290 method->set_method_data(method_data);
291 if (PrintMethodData && (Verbose || WizardMode)) {
292 ResourceMark rm(THREAD);
293 tty->print("build_interpreter_method_data for ");
294 method->print_name(tty);
295 tty->cr();
296 // At the end of the run, the MDO, full of data, will be dumped.
297 }
298 }
299 }
300
301 void methodOopDesc::cleanup_inline_caches() {
302 // The current system doesn't use inline caches in the interpreter
303 // => nothing to do (keep this method around for future use)
304 }
305
306
307 void methodOopDesc::compute_size_of_parameters(Thread *thread) {
308 symbolHandle h_signature(thread, signature());
309 ArgumentSizeComputer asc(h_signature);
310 set_size_of_parameters(asc.size() + (is_static() ? 0 : 1));
311 }
312
313 #ifdef CC_INTERP
314 void methodOopDesc::set_result_index(BasicType type) {
315 _result_index = Interpreter::BasicType_as_index(type);
316 }
317 #endif
318
319 BasicType methodOopDesc::result_type() const {
320 ResultTypeFinder rtf(signature());
321 return rtf.type();
322 }
323
324
325 bool methodOopDesc::is_empty_method() const {
326 return code_size() == 1
327 && *code_base() == Bytecodes::_return;
328 }
329
330
331 bool methodOopDesc::is_vanilla_constructor() const {
332 // Returns true if this method is a vanilla constructor, i.e. an "<init>" "()V" method
333 // which only calls the superclass vanilla constructor and possibly does stores of
334 // zero constants to local fields:
335 //
336 // aload_0
337 // invokespecial
338 // indexbyte1
339 // indexbyte2
340 //
341 // followed by an (optional) sequence of:
342 //
343 // aload_0
344 // aconst_null / iconst_0 / fconst_0 / dconst_0
345 // putfield
346 // indexbyte1
347 // indexbyte2
348 //
349 // followed by:
350 //
351 // return
352
353 assert(name() == vmSymbols::object_initializer_name(), "Should only be called for default constructors");
354 assert(signature() == vmSymbols::void_method_signature(), "Should only be called for default constructors");
355 int size = code_size();
356 // Check if size match
357 if (size == 0 || size % 5 != 0) return false;
358 address cb = code_base();
359 int last = size - 1;
360 if (cb[0] != Bytecodes::_aload_0 || cb[1] != Bytecodes::_invokespecial || cb[last] != Bytecodes::_return) {
361 // Does not call superclass default constructor
362 return false;
363 }
364 // Check optional sequence
365 for (int i = 4; i < last; i += 5) {
366 if (cb[i] != Bytecodes::_aload_0) return false;
367 if (!Bytecodes::is_zero_const(Bytecodes::cast(cb[i+1]))) return false;
368 if (cb[i+2] != Bytecodes::_putfield) return false;
369 }
370 return true;
371 }
372
373
374 bool methodOopDesc::compute_has_loops_flag() {
375 BytecodeStream bcs(methodOop(this));
376 Bytecodes::Code bc;
377
378 while ((bc = bcs.next()) >= 0) {
379 switch( bc ) {
380 case Bytecodes::_ifeq:
381 case Bytecodes::_ifnull:
382 case Bytecodes::_iflt:
383 case Bytecodes::_ifle:
384 case Bytecodes::_ifne:
385 case Bytecodes::_ifnonnull:
386 case Bytecodes::_ifgt:
387 case Bytecodes::_ifge:
388 case Bytecodes::_if_icmpeq:
389 case Bytecodes::_if_icmpne:
390 case Bytecodes::_if_icmplt:
391 case Bytecodes::_if_icmpgt:
392 case Bytecodes::_if_icmple:
393 case Bytecodes::_if_icmpge:
394 case Bytecodes::_if_acmpeq:
395 case Bytecodes::_if_acmpne:
396 case Bytecodes::_goto:
397 case Bytecodes::_jsr:
398 if( bcs.dest() < bcs.next_bci() ) _access_flags.set_has_loops();
399 break;
400
401 case Bytecodes::_goto_w:
402 case Bytecodes::_jsr_w:
403 if( bcs.dest_w() < bcs.next_bci() ) _access_flags.set_has_loops();
404 break;
405 }
406 }
407 _access_flags.set_loops_flag_init();
408 return _access_flags.has_loops();
409 }
410
411
412 bool methodOopDesc::is_final_method() const {
413 // %%% Should return true for private methods also,
414 // since there is no way to override them.
415 return is_final() || Klass::cast(method_holder())->is_final();
416 }
417
418
419 bool methodOopDesc::is_strict_method() const {
420 return is_strict();
421 }
422
423
424 bool methodOopDesc::can_be_statically_bound() const {
425 if (is_final_method()) return true;
426 return vtable_index() == nonvirtual_vtable_index;
427 }
428
429
430 bool methodOopDesc::is_accessor() const {
431 if (code_size() != 5) return false;
432 if (size_of_parameters() != 1) return false;
433 if (Bytecodes::java_code_at(code_base()+0) != Bytecodes::_aload_0 ) return false;
434 if (Bytecodes::java_code_at(code_base()+1) != Bytecodes::_getfield) return false;
435 Bytecodes::Code ret_bc = Bytecodes::java_code_at(code_base()+4);
436 if (Bytecodes::java_code_at(code_base()+4) != Bytecodes::_areturn &&
437 Bytecodes::java_code_at(code_base()+4) != Bytecodes::_ireturn ) return false;
438 return true;
439 }
440
441
442 bool methodOopDesc::is_initializer() const {
443 return name() == vmSymbols::object_initializer_name() || name() == vmSymbols::class_initializer_name();
444 }
445
446
447 objArrayHandle methodOopDesc::resolved_checked_exceptions_impl(methodOop this_oop, TRAPS) {
448 int length = this_oop->checked_exceptions_length();
449 if (length == 0) { // common case
450 return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
451 } else {
452 methodHandle h_this(THREAD, this_oop);
453 objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::class_klass(), length, CHECK_(objArrayHandle()));
454 objArrayHandle mirrors (THREAD, m_oop);
455 for (int i = 0; i < length; i++) {
456 CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
457 klassOop k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
458 assert(Klass::cast(k)->is_subclass_of(SystemDictionary::throwable_klass()), "invalid exception class");
459 mirrors->obj_at_put(i, Klass::cast(k)->java_mirror());
460 }
461 return mirrors;
462 }
463 };
464
465
466 int methodOopDesc::line_number_from_bci(int bci) const {
467 if (bci == SynchronizationEntryBCI) bci = 0;
468 assert(bci == 0 || 0 <= bci && bci < code_size(), "illegal bci");
469 int best_bci = 0;
470 int best_line = -1;
471
472 if (has_linenumber_table()) {
473 // The line numbers are a short array of 2-tuples [start_pc, line_number].
474 // Not necessarily sorted and not necessarily one-to-one.
475 CompressedLineNumberReadStream stream(compressed_linenumber_table());
476 while (stream.read_pair()) {
477 if (stream.bci() == bci) {
478 // perfect match
479 return stream.line();
480 } else {
481 // update best_bci/line
482 if (stream.bci() < bci && stream.bci() >= best_bci) {
483 best_bci = stream.bci();
484 best_line = stream.line();
485 }
486 }
487 }
488 }
489 return best_line;
490 }
491
492
493 bool methodOopDesc::is_klass_loaded_by_klass_index(int klass_index) const {
494 if( _constants->tag_at(klass_index).is_unresolved_klass() ) {
495 Thread *thread = Thread::current();
496 symbolHandle klass_name(thread, _constants->klass_name_at(klass_index));
497 Handle loader(thread, instanceKlass::cast(method_holder())->class_loader());
498 Handle prot (thread, Klass::cast(method_holder())->protection_domain());
499 return SystemDictionary::find(klass_name, loader, prot, thread) != NULL;
500 } else {
501 return true;
502 }
503 }
504
505
506 bool methodOopDesc::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
507 int klass_index = _constants->klass_ref_index_at(refinfo_index);
508 if (must_be_resolved) {
509 // Make sure klass is resolved in constantpool.
510 if (constants()->tag_at(klass_index).is_unresolved_klass()) return false;
511 }
512 return is_klass_loaded_by_klass_index(klass_index);
513 }
514
515
516 void methodOopDesc::set_native_function(address function, bool post_event_flag) {
517 assert(function != NULL, "use clear_native_function to unregister natives");
518 address* native_function = native_function_addr();
519
520 // We can see racers trying to place the same native function into place. Once
521 // is plenty.
522 address current = *native_function;
523 if (current == function) return;
524 if (post_event_flag && JvmtiExport::should_post_native_method_bind() &&
525 function != NULL) {
526 // native_method_throw_unsatisfied_link_error_entry() should only
527 // be passed when post_event_flag is false.
528 assert(function !=
529 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
530 "post_event_flag mis-match");
531
532 // post the bind event, and possible change the bind function
533 JvmtiExport::post_native_method_bind(this, &function);
534 }
535 *native_function = function;
536 // This function can be called more than once. We must make sure that we always
537 // use the latest registered method -> check if a stub already has been generated.
538 // If so, we have to make it not_entrant.
539 nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
540 if (nm != NULL) {
541 nm->make_not_entrant();
542 }
543 }
544
545
546 bool methodOopDesc::has_native_function() const {
547 address func = native_function();
548 return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
549 }
550
551
552 void methodOopDesc::clear_native_function() {
553 set_native_function(
554 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
555 !native_bind_event_is_interesting);
556 clear_code();
557 }
558
559
560 void methodOopDesc::set_signature_handler(address handler) {
561 address* signature_handler = signature_handler_addr();
562 *signature_handler = handler;
563 }
564
565
566 bool methodOopDesc::is_not_compilable(int comp_level) const {
567 methodDataOop mdo = method_data();
568 if (mdo != NULL
569 && (uint)mdo->decompile_count() > (uint)PerMethodRecompilationCutoff) {
570 // Since (uint)-1 is large, -1 really means 'no cutoff'.
571 return true;
572 }
573 #ifdef COMPILER2
574 if (is_tier1_compile(comp_level)) {
575 if (is_not_tier1_compilable()) {
576 return true;
577 }
578 }
579 #endif // COMPILER2
580 return (_invocation_counter.state() == InvocationCounter::wait_for_nothing)
581 || (number_of_breakpoints() > 0);
582 }
583
584 // call this when compiler finds that this method is not compilable
585 void methodOopDesc::set_not_compilable(int comp_level) {
586 if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
587 ttyLocker ttyl;
588 xtty->begin_elem("make_not_compilable thread='%d'", (int) os::current_thread_id());
589 xtty->method(methodOop(this));
590 xtty->stamp();
591 xtty->end_elem();
592 }
593 #ifdef COMPILER2
594 if (is_tier1_compile(comp_level)) {
595 set_not_tier1_compilable();
596 return;
597 }
598 #endif /* COMPILER2 */
599 assert(comp_level == CompLevel_highest_tier, "unexpected compilation level");
600 invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
601 backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
602 }
603
604 // Revert to using the interpreter and clear out the nmethod
605 void methodOopDesc::clear_code() {
606
607 // this may be NULL if c2i adapters have not been made yet
608 // Only should happen at allocate time.
609 if (_adapter == NULL) {
610 _from_compiled_entry = NULL;
611 } else {
612 _from_compiled_entry = _adapter->get_c2i_entry();
613 }
614 OrderAccess::storestore();
615 _from_interpreted_entry = _i2i_entry;
616 OrderAccess::storestore();
617 _code = NULL;
618 }
619
620 // Called by class data sharing to remove any entry points (which are not shared)
621 void methodOopDesc::unlink_method() {
622 _code = NULL;
623 _i2i_entry = NULL;
624 _from_interpreted_entry = NULL;
625 if (is_native()) {
626 *native_function_addr() = NULL;
627 set_signature_handler(NULL);
628 }
629 NOT_PRODUCT(set_compiled_invocation_count(0);)
630 invocation_counter()->reset();
631 backedge_counter()->reset();
632 _adapter = NULL;
633 _from_compiled_entry = NULL;
634 assert(_method_data == NULL, "unexpected method data?");
635 set_method_data(NULL);
636 set_interpreter_throwout_count(0);
637 set_interpreter_invocation_count(0);
638 _highest_tier_compile = CompLevel_none;
639 }
640
641 // Called when the method_holder is getting linked. Setup entrypoints so the method
642 // is ready to be called from interpreter, compiler, and vtables.
643 void methodOopDesc::link_method(methodHandle h_method, TRAPS) {
644 assert(_i2i_entry == NULL, "should only be called once");
645 assert(_adapter == NULL, "init'd to NULL" );
646 assert( _code == NULL, "nothing compiled yet" );
647
648 // Setup interpreter entrypoint
649 assert(this == h_method(), "wrong h_method()" );
650 address entry = Interpreter::entry_for_method(h_method);
651 assert(entry != NULL, "interpreter entry must be non-null");
652 // Sets both _i2i_entry and _from_interpreted_entry
653 set_interpreter_entry(entry);
654 if (is_native()) {
655 set_native_function(
656 SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
657 !native_bind_event_is_interesting);
658 }
659
660 // Setup compiler entrypoint. This is made eagerly, so we do not need
661 // special handling of vtables. An alternative is to make adapters more
662 // lazily by calling make_adapter() from from_compiled_entry() for the
663 // normal calls. For vtable calls life gets more complicated. When a
664 // call-site goes mega-morphic we need adapters in all methods which can be
665 // called from the vtable. We need adapters on such methods that get loaded
666 // later. Ditto for mega-morphic itable calls. If this proves to be a
667 // problem we'll make these lazily later.
668 (void) make_adapters(h_method, CHECK);
669
670 // ONLY USE the h_method now as make_adapter may have blocked
671
672 }
673
674 address methodOopDesc::make_adapters(methodHandle mh, TRAPS) {
675 // If running -Xint we need no adapters.
676 if (Arguments::mode() == Arguments::_int) return NULL;
677
678 // Adapters for compiled code are made eagerly here. They are fairly
679 // small (generally < 100 bytes) and quick to make (and cached and shared)
680 // so making them eagerly shouldn't be too expensive.
681 AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
682 if (adapter == NULL ) {
683 THROW_0(vmSymbols::java_lang_OutOfMemoryError());
684 }
685
686 mh->set_adapter_entry(adapter);
687 mh->_from_compiled_entry = adapter->get_c2i_entry();
688 return adapter->get_c2i_entry();
689 }
690
691 // The verified_code_entry() must be called when a invoke is resolved
692 // on this method.
693
694 // It returns the compiled code entry point, after asserting not null.
695 // This function is called after potential safepoints so that nmethod
696 // or adapter that it points to is still live and valid.
697 // This function must not hit a safepoint!
698 address methodOopDesc::verified_code_entry() {
699 debug_only(No_Safepoint_Verifier nsv;)
700 assert(_from_compiled_entry != NULL, "must be set");
701 return _from_compiled_entry;
702 }
703
704 // Check that if an nmethod ref exists, it has a backlink to this or no backlink at all
705 // (could be racing a deopt).
706 // Not inline to avoid circular ref.
707 bool methodOopDesc::check_code() const {
708 // cached in a register or local. There's a race on the value of the field.
709 nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
710 return code == NULL || (code->method() == NULL) || (code->method() == (methodOop)this && !code->is_osr_method());
711 }
712
713 // Install compiled code. Instantly it can execute.
714 void methodOopDesc::set_code(methodHandle mh, nmethod *code) {
715 assert( code, "use clear_code to remove code" );
716 assert( mh->check_code(), "" );
717
718 guarantee(mh->adapter() != NULL, "Adapter blob must already exist!");
719
720 // These writes must happen in this order, because the interpreter will
721 // directly jump to from_interpreted_entry which jumps to an i2c adapter
722 // which jumps to _from_compiled_entry.
723 mh->_code = code; // Assign before allowing compiled code to exec
724
725 int comp_level = code->comp_level();
726 // In theory there could be a race here. In practice it is unlikely
727 // and not worth worrying about.
728 if (comp_level > highest_tier_compile()) {
729 set_highest_tier_compile(comp_level);
730 }
731
732 OrderAccess::storestore();
733 mh->_from_compiled_entry = code->verified_entry_point();
734 OrderAccess::storestore();
735 // Instantly compiled code can execute.
736 mh->_from_interpreted_entry = mh->get_i2c_entry();
737
738 }
739
740
741 bool methodOopDesc::is_overridden_in(klassOop k) const {
742 instanceKlass* ik = instanceKlass::cast(k);
743
744 if (ik->is_interface()) return false;
745
746 // If method is an interface, we skip it - except if it
747 // is a miranda method
748 if (instanceKlass::cast(method_holder())->is_interface()) {
749 // Check that method is not a miranda method
750 if (ik->lookup_method(name(), signature()) == NULL) {
751 // No implementation exist - so miranda method
752 return false;
753 }
754 return true;
755 }
756
757 assert(ik->is_subclass_of(method_holder()), "should be subklass");
758 assert(ik->vtable() != NULL, "vtable should exist");
759 if (vtable_index() == nonvirtual_vtable_index) {
760 return false;
761 } else {
762 methodOop vt_m = ik->method_at_vtable(vtable_index());
763 return vt_m != methodOop(this);
764 }
765 }
766
767
768 methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
769 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) {
770 // Code below does not work for native methods - they should never get rewritten anyway
771 assert(!m->is_native(), "cannot rewrite native methods");
772 // Allocate new methodOop
773 AccessFlags flags = m->access_flags();
774 int checked_exceptions_len = m->checked_exceptions_length();
775 int localvariable_len = m->localvariable_table_length();
776 methodOop newm_oop = oopFactory::new_method(new_code_length, flags, new_compressed_linenumber_size, localvariable_len, checked_exceptions_len, CHECK_(methodHandle()));
777 methodHandle newm (THREAD, newm_oop);
778 int new_method_size = newm->method_size();
779 // Create a shallow copy of methodOopDesc part, but be careful to preserve the new constMethodOop
780 constMethodOop newcm = newm->constMethod();
781 int new_const_method_size = newm->constMethod()->object_size();
782 memcpy(newm(), m(), sizeof(methodOopDesc));
783 // Create shallow copy of constMethodOopDesc, but be careful to preserve the methodOop
784 memcpy(newcm, m->constMethod(), sizeof(constMethodOopDesc));
785 // Reset correct method/const method, method size, and parameter info
786 newcm->set_method(newm());
787 newm->set_constMethod(newcm);
788 assert(newcm->method() == newm(), "check");
789 newm->constMethod()->set_code_size(new_code_length);
790 newm->constMethod()->set_constMethod_size(new_const_method_size);
791 newm->set_method_size(new_method_size);
792 assert(newm->code_size() == new_code_length, "check");
793 assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
794 assert(newm->localvariable_table_length() == localvariable_len, "check");
795 // Copy new byte codes
796 memcpy(newm->code_base(), new_code, new_code_length);
797 // Copy line number table
798 if (new_compressed_linenumber_size > 0) {
799 memcpy(newm->compressed_linenumber_table(),
800 new_compressed_linenumber_table,
801 new_compressed_linenumber_size);
802 }
803 // Copy checked_exceptions
804 if (checked_exceptions_len > 0) {
805 memcpy(newm->checked_exceptions_start(),
806 m->checked_exceptions_start(),
807 checked_exceptions_len * sizeof(CheckedExceptionElement));
808 }
809 // Copy local variable number table
810 if (localvariable_len > 0) {
811 memcpy(newm->localvariable_table_start(),
812 m->localvariable_table_start(),
813 localvariable_len * sizeof(LocalVariableTableElement));
814 }
815 return newm;
816 }
817
818 vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
819 assert(vmIntrinsics::_none == 0, "correct coding of default case");
820 const uintptr_t max_cache_uint = right_n_bits((int)(sizeof(_intrinsic_id_cache) * BitsPerByte));
821 assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_cache_uint, "else fix cache size");
822 // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
823 // because we are not loading from core libraries
824 if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
825
826 // see if the klass name is well-known:
827 symbolOop klass_name = instanceKlass::cast(method_holder())->name();
828 vmSymbols::SID klass_id = vmSymbols::find_sid(klass_name);
829 if (klass_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
830
831 // ditto for method and signature:
832 vmSymbols::SID name_id = vmSymbols::find_sid(name());
833 if (name_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
834 vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
835 if (sig_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
836 jshort flags = access_flags().as_short();
837
838 // A few slightly irregular cases:
839 switch (klass_id) {
840 case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
841 // Second chance: check in regular Math.
842 switch (name_id) {
843 case vmSymbols::VM_SYMBOL_ENUM_NAME(min_name):
844 case vmSymbols::VM_SYMBOL_ENUM_NAME(max_name):
845 case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
846 // pretend it is the corresponding method in the non-strict class:
847 klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
848 break;
849 }
850 }
851
852 // return intrinsic id if any
853 return vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
854 }
855
856
857 // These two methods are static since a GC may move the methodOopDesc
858 bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
859 bool sig_is_loaded = true;
860 Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader());
861 Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain());
862 symbolHandle signature(THREAD, m->signature());
863 for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
864 if (ss.is_object()) {
865 symbolOop sym = ss.as_symbol(CHECK_(false));
866 symbolHandle name (THREAD, sym);
867 klassOop klass = SystemDictionary::resolve_or_null(name, class_loader,
868 protection_domain, THREAD);
869 // We are loading classes eagerly. If a ClassNotFoundException was generated,
870 // be sure to ignore it.
871 if (HAS_PENDING_EXCEPTION) {
872 if (PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) {
873 CLEAR_PENDING_EXCEPTION;
874 } else {
875 return false;
876 }
877 }
878 if( klass == NULL) { sig_is_loaded = false; }
879 }
880 }
881 return sig_is_loaded;
882 }
883
884 bool methodOopDesc::has_unloaded_classes_in_signature(methodHandle m, TRAPS) {
885 Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader());
886 Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain());
887 symbolHandle signature(THREAD, m->signature());
888 for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
889 if (ss.type() == T_OBJECT) {
890 symbolHandle name(THREAD, ss.as_symbol_or_null());
891 if (name() == NULL) return true;
892 klassOop klass = SystemDictionary::find(name, class_loader, protection_domain, THREAD);
893 if (klass == NULL) return true;
894 }
895 }
896 return false;
897 }
898
899 // Exposed so field engineers can debug VM
900 void methodOopDesc::print_short_name(outputStream* st) {
901 ResourceMark rm;
902 #ifdef PRODUCT
903 st->print(" %s::", method_holder()->klass_part()->external_name());
904 #else
905 st->print(" %s::", method_holder()->klass_part()->internal_name());
906 #endif
907 name()->print_symbol_on(st);
908 if (WizardMode) signature()->print_symbol_on(st);
909 }
910
911
912 extern "C" {
913 static int method_compare(methodOop* a, methodOop* b) {
914 return (*a)->name()->fast_compare((*b)->name());
915 }
916
917 // Prevent qsort from reordering a previous valid sort by
918 // considering the address of the methodOops if two methods
919 // would otherwise compare as equal. Required to preserve
920 // optimal access order in the shared archive. Slower than
921 // method_compare, only used for shared archive creation.
922 static int method_compare_idempotent(methodOop* a, methodOop* b) {
923 int i = method_compare(a, b);
924 if (i != 0) return i;
925 return ( a < b ? -1 : (a == b ? 0 : 1));
926 }
927
928 typedef int (*compareFn)(const void*, const void*);
929 }
930
931
932 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
933 static void reorder_based_on_method_index(objArrayOop methods,
934 objArrayOop annotations,
935 oop* temp_array) {
936 if (annotations == NULL) {
937 return;
938 }
939
940 int length = methods->length();
941 int i;
942 // Copy to temp array
943 memcpy(temp_array, annotations->obj_at_addr(0), length * sizeof(oop));
944
945 // Copy back using old method indices
946 for (i = 0; i < length; i++) {
947 methodOop m = (methodOop) methods->obj_at(i);
948 annotations->obj_at_put(i, temp_array[m->method_idnum()]);
949 }
950 }
951
952
953 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
954 void methodOopDesc::sort_methods(objArrayOop methods,
955 objArrayOop methods_annotations,
956 objArrayOop methods_parameter_annotations,
957 objArrayOop methods_default_annotations,
958 bool idempotent) {
959 int length = methods->length();
960 if (length > 1) {
961 bool do_annotations = false;
962 if (methods_annotations != NULL ||
963 methods_parameter_annotations != NULL ||
964 methods_default_annotations != NULL) {
965 do_annotations = true;
966 }
967 if (do_annotations) {
968 // Remember current method ordering so we can reorder annotations
969 for (int i = 0; i < length; i++) {
970 methodOop m = (methodOop) methods->obj_at(i);
971 m->set_method_idnum(i);
972 }
973 }
974
975 // Use a simple bubble sort for small number of methods since
976 // qsort requires a functional pointer call for each comparison.
977 if (length < 8) {
978 bool sorted = true;
979 for (int i=length-1; i>0; i--) {
980 for (int j=0; j<i; j++) {
981 methodOop m1 = (methodOop)methods->obj_at(j);
982 methodOop m2 = (methodOop)methods->obj_at(j+1);
983 if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) {
984 methods->obj_at_put(j, m2);
985 methods->obj_at_put(j+1, m1);
986 sorted = false;
987 }
988 }
989 if (sorted) break;
990 sorted = true;
991 }
992 } else {
993 compareFn compare = (compareFn) (idempotent ? method_compare_idempotent : method_compare);
994 qsort(methods->obj_at_addr(0), length, oopSize, compare);
995 }
996
997 // Sort annotations if necessary
998 assert(methods_annotations == NULL || methods_annotations->length() == methods->length(), "");
999 assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
1000 assert(methods_default_annotations == NULL || methods_default_annotations->length() == methods->length(), "");
1001 if (do_annotations) {
1002 // Allocate temporary storage
1003 oop* temp_array = NEW_RESOURCE_ARRAY(oop, length);
1004 reorder_based_on_method_index(methods, methods_annotations, temp_array);
1005 reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array);
1006 reorder_based_on_method_index(methods, methods_default_annotations, temp_array);
1007 }
1008
1009 // Reset method ordering
1010 for (int i = 0; i < length; i++) {
1011 methodOop m = (methodOop) methods->obj_at(i);
1012 m->set_method_idnum(i);
1013 }
1014 }
1015 }
1016
1017
1018 //-----------------------------------------------------------------------------------
1019 // Non-product code
1020
1021 #ifndef PRODUCT
1022 class SignatureTypePrinter : public SignatureTypeNames {
1023 private:
1024 outputStream* _st;
1025 bool _use_separator;
1026
1027 void type_name(const char* name) {
1028 if (_use_separator) _st->print(", ");
1029 _st->print(name);
1030 _use_separator = true;
1031 }
1032
1033 public:
1034 SignatureTypePrinter(symbolHandle signature, outputStream* st) : SignatureTypeNames(signature) {
1035 _st = st;
1036 _use_separator = false;
1037 }
1038
1039 void print_parameters() { _use_separator = false; iterate_parameters(); }
1040 void print_returntype() { _use_separator = false; iterate_returntype(); }
1041 };
1042
1043
1044 void methodOopDesc::print_name(outputStream* st) {
1045 Thread *thread = Thread::current();
1046 ResourceMark rm(thread);
1047 SignatureTypePrinter sig(signature(), st);
1048 st->print("%s ", is_static() ? "static" : "virtual");
1049 sig.print_returntype();
1050 st->print(" %s.", method_holder()->klass_part()->internal_name());
1051 name()->print_symbol_on(st);
1052 st->print("(");
1053 sig.print_parameters();
1054 st->print(")");
1055 }
1056
1057
1058 void methodOopDesc::print_codes_on(outputStream* st) const {
1059 print_codes_on(0, code_size(), st);
1060 }
1061
1062 void methodOopDesc::print_codes_on(int from, int to, outputStream* st) const {
1063 Thread *thread = Thread::current();
1064 ResourceMark rm(thread);
1065 methodHandle mh (thread, (methodOop)this);
1066 BytecodeStream s(mh);
1067 s.set_interval(from, to);
1068 BytecodeTracer::set_closure(BytecodeTracer::std_closure());
1069 while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
1070 }
1071 #endif // not PRODUCT
1072
1073
1074 // Simple compression of line number tables. We use a regular compressed stream, except that we compress deltas
1075 // between (bci,line) pairs since they are smaller. If (bci delta, line delta) fits in (5-bit unsigned, 3-bit unsigned)
1076 // we save it as one byte, otherwise we write a 0xFF escape character and use regular compression. 0x0 is used
1077 // as end-of-stream terminator.
1078
1079 void CompressedLineNumberWriteStream::write_pair_regular(int bci_delta, int line_delta) {
1080 // bci and line number does not compress into single byte.
1081 // Write out escape character and use regular compression for bci and line number.
1082 write_byte((jubyte)0xFF);
1083 write_signed_int(bci_delta);
1084 write_signed_int(line_delta);
1085 }
1086
1087 // See comment in methodOop.hpp which explains why this exists.
1088 #if defined(_M_AMD64) && MSC_VER >= 1400
1089 #pragma optimize("", off)
1090 void CompressedLineNumberWriteStream::write_pair(int bci, int line) {
1091 write_pair_inline(bci, line);
1092 }
1093 #pragma optimize("", on)
1094 #endif
1095
1096 CompressedLineNumberReadStream::CompressedLineNumberReadStream(u_char* buffer) : CompressedReadStream(buffer) {
1097 _bci = 0;
1098 _line = 0;
1099 };
1100
1101
1102 bool CompressedLineNumberReadStream::read_pair() {
1103 jubyte next = read_byte();
1104 // Check for terminator
1105 if (next == 0) return false;
1106 if (next == 0xFF) {
1107 // Escape character, regular compression used
1108 _bci += read_signed_int();
1109 _line += read_signed_int();
1110 } else {
1111 // Single byte compression used
1112 _bci += next >> 3;
1113 _line += next & 0x7;
1114 }
1115 return true;
1116 }
1117
1118
1119 Bytecodes::Code methodOopDesc::orig_bytecode_at(int bci) {
1120 BreakpointInfo* bp = instanceKlass::cast(method_holder())->breakpoints();
1121 for (; bp != NULL; bp = bp->next()) {
1122 if (bp->match(this, bci)) {
1123 return bp->orig_bytecode();
1124 }
1125 }
1126 ShouldNotReachHere();
1127 return Bytecodes::_shouldnotreachhere;
1128 }
1129
1130 void methodOopDesc::set_orig_bytecode_at(int bci, Bytecodes::Code code) {
1131 assert(code != Bytecodes::_breakpoint, "cannot patch breakpoints this way");
1132 BreakpointInfo* bp = instanceKlass::cast(method_holder())->breakpoints();
1133 for (; bp != NULL; bp = bp->next()) {
1134 if (bp->match(this, bci)) {
1135 bp->set_orig_bytecode(code);
1136 // and continue, in case there is more than one
1137 }
1138 }
1139 }
1140
1141 void methodOopDesc::set_breakpoint(int bci) {
1142 instanceKlass* ik = instanceKlass::cast(method_holder());
1143 BreakpointInfo *bp = new BreakpointInfo(this, bci);
1144 bp->set_next(ik->breakpoints());
1145 ik->set_breakpoints(bp);
1146 // do this last:
1147 bp->set(this);
1148 }
1149
1150 static void clear_matches(methodOop m, int bci) {
1151 instanceKlass* ik = instanceKlass::cast(m->method_holder());
1152 BreakpointInfo* prev_bp = NULL;
1153 BreakpointInfo* next_bp;
1154 for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = next_bp) {
1155 next_bp = bp->next();
1156 // bci value of -1 is used to delete all breakpoints in method m (ex: clear_all_breakpoint).
1157 if (bci >= 0 ? bp->match(m, bci) : bp->match(m)) {
1158 // do this first:
1159 bp->clear(m);
1160 // unhook it
1161 if (prev_bp != NULL)
1162 prev_bp->set_next(next_bp);
1163 else
1164 ik->set_breakpoints(next_bp);
1165 delete bp;
1166 // When class is redefined JVMTI sets breakpoint in all versions of EMCP methods
1167 // at same location. So we have multiple matching (method_index and bci)
1168 // BreakpointInfo nodes in BreakpointInfo list. We should just delete one
1169 // breakpoint for clear_breakpoint request and keep all other method versions
1170 // BreakpointInfo for future clear_breakpoint request.
1171 // bcivalue of -1 is used to clear all breakpoints (see clear_all_breakpoints)
1172 // which is being called when class is unloaded. We delete all the Breakpoint
1173 // information for all versions of method. We may not correctly restore the original
1174 // bytecode in all method versions, but that is ok. Because the class is being unloaded
1175 // so these methods won't be used anymore.
1176 if (bci >= 0) {
1177 break;
1178 }
1179 } else {
1180 // This one is a keeper.
1181 prev_bp = bp;
1182 }
1183 }
1184 }
1185
1186 void methodOopDesc::clear_breakpoint(int bci) {
1187 assert(bci >= 0, "");
1188 clear_matches(this, bci);
1189 }
1190
1191 void methodOopDesc::clear_all_breakpoints() {
1192 clear_matches(this, -1);
1193 }
1194
1195
1196 BreakpointInfo::BreakpointInfo(methodOop m, int bci) {
1197 _bci = bci;
1198 _name_index = m->name_index();
1199 _signature_index = m->signature_index();
1200 _orig_bytecode = (Bytecodes::Code) *m->bcp_from(_bci);
1201 if (_orig_bytecode == Bytecodes::_breakpoint)
1202 _orig_bytecode = m->orig_bytecode_at(_bci);
1203 _next = NULL;
1204 }
1205
1206 void BreakpointInfo::set(methodOop method) {
1207 #ifdef ASSERT
1208 {
1209 Bytecodes::Code code = (Bytecodes::Code) *method->bcp_from(_bci);
1210 if (code == Bytecodes::_breakpoint)
1211 code = method->orig_bytecode_at(_bci);
1212 assert(orig_bytecode() == code, "original bytecode must be the same");
1213 }
1214 #endif
1215 *method->bcp_from(_bci) = Bytecodes::_breakpoint;
1216 method->incr_number_of_breakpoints();
1217 SystemDictionary::notice_modification();
1218 {
1219 // Deoptimize all dependents on this method
1220 Thread *thread = Thread::current();
1221 HandleMark hm(thread);
1222 methodHandle mh(thread, method);
1223 Universe::flush_dependents_on_method(mh);
1224 }
1225 }
1226
1227 void BreakpointInfo::clear(methodOop method) {
1228 *method->bcp_from(_bci) = orig_bytecode();
1229 assert(method->number_of_breakpoints() > 0, "must not go negative");
1230 method->decr_number_of_breakpoints();
1231 }