comparison src/share/vm/interpreter/interpreterRuntime.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children e5b0439ef4ae
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_interpreterRuntime.cpp.incl"
27
28 class UnlockFlagSaver {
29 private:
30 JavaThread* _thread;
31 bool _do_not_unlock;
32 public:
33 UnlockFlagSaver(JavaThread* t) {
34 _thread = t;
35 _do_not_unlock = t->do_not_unlock_if_synchronized();
36 t->set_do_not_unlock_if_synchronized(false);
37 }
38 ~UnlockFlagSaver() {
39 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
40 }
41 };
42
43 //------------------------------------------------------------------------------------------------------------------------
44 // State accessors
45
46 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) {
47 last_frame(thread).interpreter_frame_set_bcp(bcp);
48 if (ProfileInterpreter) {
49 // ProfileTraps uses MDOs independently of ProfileInterpreter.
50 // That is why we must check both ProfileInterpreter and mdo != NULL.
51 methodDataOop mdo = last_frame(thread).interpreter_frame_method()->method_data();
52 if (mdo != NULL) {
53 NEEDS_CLEANUP;
54 last_frame(thread).interpreter_frame_set_mdp(mdo->bci_to_dp(last_frame(thread).interpreter_frame_bci()));
55 }
56 }
57 }
58
59 //------------------------------------------------------------------------------------------------------------------------
60 // Constants
61
62
63 IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
64 // access constant pool
65 constantPoolOop pool = method(thread)->constants();
66 int index = wide ? two_byte_index(thread) : one_byte_index(thread);
67 constantTag tag = pool->tag_at(index);
68
69 if (tag.is_unresolved_klass() || tag.is_klass()) {
70 klassOop klass = pool->klass_at(index, CHECK);
71 oop java_class = klass->klass_part()->java_mirror();
72 thread->set_vm_result(java_class);
73 } else {
74 #ifdef ASSERT
75 // If we entered this runtime routine, we believed the tag contained
76 // an unresolved string, an unresolved class or a resolved class.
77 // However, another thread could have resolved the unresolved string
78 // or class by the time we go there.
79 assert(tag.is_unresolved_string()|| tag.is_string(), "expected string");
80 #endif
81 oop s_oop = pool->string_at(index, CHECK);
82 thread->set_vm_result(s_oop);
83 }
84 IRT_END
85
86
87 //------------------------------------------------------------------------------------------------------------------------
88 // Allocation
89
90 IRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* thread, constantPoolOopDesc* pool, int index))
91 klassOop k_oop = pool->klass_at(index, CHECK);
92 instanceKlassHandle klass (THREAD, k_oop);
93
94 // Make sure we are not instantiating an abstract klass
95 klass->check_valid_for_instantiation(true, CHECK);
96
97 // Make sure klass is initialized
98 klass->initialize(CHECK);
99
100 // At this point the class may not be fully initialized
101 // because of recursive initialization. If it is fully
102 // initialized & has_finalized is not set, we rewrite
103 // it into its fast version (Note: no locking is needed
104 // here since this is an atomic byte write and can be
105 // done more than once).
106 //
107 // Note: In case of classes with has_finalized we don't
108 // rewrite since that saves us an extra check in
109 // the fast version which then would call the
110 // slow version anyway (and do a call back into
111 // Java).
112 // If we have a breakpoint, then we don't rewrite
113 // because the _breakpoint bytecode would be lost.
114 oop obj = klass->allocate_instance(CHECK);
115 thread->set_vm_result(obj);
116 IRT_END
117
118
119 IRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size))
120 oop obj = oopFactory::new_typeArray(type, size, CHECK);
121 thread->set_vm_result(obj);
122 IRT_END
123
124
125 IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, constantPoolOopDesc* pool, int index, jint size))
126 // Note: no oopHandle for pool & klass needed since they are not used
127 // anymore after new_objArray() and no GC can happen before.
128 // (This may have to change if this code changes!)
129 klassOop klass = pool->klass_at(index, CHECK);
130 objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK);
131 thread->set_vm_result(obj);
132 IRT_END
133
134
135 IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
136 // We may want to pass in more arguments - could make this slightly faster
137 constantPoolOop constants = method(thread)->constants();
138 int i = two_byte_index(thread);
139 klassOop klass = constants->klass_at(i, CHECK);
140 int nof_dims = number_of_dimensions(thread);
141 assert(oop(klass)->is_klass(), "not a class");
142 assert(nof_dims >= 1, "multianewarray rank must be nonzero");
143
144 // We must create an array of jints to pass to multi_allocate.
145 ResourceMark rm(thread);
146 const int small_dims = 10;
147 jint dim_array[small_dims];
148 jint *dims = &dim_array[0];
149 if (nof_dims > small_dims) {
150 dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims);
151 }
152 for (int index = 0; index < nof_dims; index++) {
153 // offset from first_size_address is addressed as local[index]
154 int n = Interpreter::local_offset_in_bytes(index)/jintSize;
155 dims[index] = first_size_address[n];
156 }
157 oop obj = arrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK);
158 thread->set_vm_result(obj);
159 IRT_END
160
161
162 IRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
163 assert(obj->is_oop(), "must be a valid oop");
164 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
165 instanceKlass::register_finalizer(instanceOop(obj), CHECK);
166 IRT_END
167
168
169 // Quicken instance-of and check-cast bytecodes
170 IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
171 // Force resolving; quicken the bytecode
172 int which = two_byte_index(thread);
173 constantPoolOop cpool = method(thread)->constants();
174 // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
175 // program we might have seen an unquick'd bytecode in the interpreter but have another
176 // thread quicken the bytecode before we get here.
177 // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" );
178 klassOop klass = cpool->klass_at(which, CHECK);
179 thread->set_vm_result(klass);
180 IRT_END
181
182
183 //------------------------------------------------------------------------------------------------------------------------
184 // Exceptions
185
186 // Assume the compiler is (or will be) interested in this event.
187 // If necessary, create an MDO to hold the information, and record it.
188 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
189 assert(ProfileTraps, "call me only if profiling");
190 methodHandle trap_method(thread, method(thread));
191 if (trap_method.not_null()) {
192 methodDataHandle trap_mdo(thread, trap_method->method_data());
193 if (trap_mdo.is_null()) {
194 methodOopDesc::build_interpreter_method_data(trap_method, THREAD);
195 if (HAS_PENDING_EXCEPTION) {
196 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
197 CLEAR_PENDING_EXCEPTION;
198 }
199 trap_mdo = methodDataHandle(thread, trap_method->method_data());
200 // and fall through...
201 }
202 if (trap_mdo.not_null()) {
203 // Update per-method count of trap events. The interpreter
204 // is updating the MDO to simulate the effect of compiler traps.
205 int trap_bci = trap_method->bci_from(bcp(thread));
206 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason);
207 }
208 }
209 }
210
211 static Handle get_preinitialized_exception(klassOop k, TRAPS) {
212 // get klass
213 instanceKlass* klass = instanceKlass::cast(k);
214 assert(klass->is_initialized(),
215 "this klass should have been initialized during VM initialization");
216 // create instance - do not call constructor since we may have no
217 // (java) stack space left (should assert constructor is empty)
218 Handle exception;
219 oop exception_oop = klass->allocate_instance(CHECK_(exception));
220 exception = Handle(THREAD, exception_oop);
221 if (StackTraceInThrowable) {
222 java_lang_Throwable::fill_in_stack_trace(exception);
223 }
224 return exception;
225 }
226
227 // Special handling for stack overflow: since we don't have any (java) stack
228 // space left we use the pre-allocated & pre-initialized StackOverflowError
229 // klass to create an stack overflow error instance. We do not call its
230 // constructor for the same reason (it is empty, anyway).
231 IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread))
232 Handle exception = get_preinitialized_exception(
233 SystemDictionary::StackOverflowError_klass(),
234 CHECK);
235 THROW_HANDLE(exception);
236 IRT_END
237
238
239 IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message))
240 // lookup exception klass
241 symbolHandle s = oopFactory::new_symbol_handle(name, CHECK);
242 if (ProfileTraps) {
243 if (s == vmSymbols::java_lang_ArithmeticException()) {
244 note_trap(thread, Deoptimization::Reason_div0_check, CHECK);
245 } else if (s == vmSymbols::java_lang_NullPointerException()) {
246 note_trap(thread, Deoptimization::Reason_null_check, CHECK);
247 }
248 }
249 // create exception
250 Handle exception = Exceptions::new_exception(thread, s(), message);
251 thread->set_vm_result(exception());
252 IRT_END
253
254
255 IRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* thread, char* name, oopDesc* obj))
256 ResourceMark rm(thread);
257 const char* klass_name = Klass::cast(obj->klass())->external_name();
258 // lookup exception klass
259 symbolHandle s = oopFactory::new_symbol_handle(name, CHECK);
260 if (ProfileTraps) {
261 note_trap(thread, Deoptimization::Reason_class_check, CHECK);
262 }
263 // create exception, with klass name as detail message
264 Handle exception = Exceptions::new_exception(thread, s(), klass_name);
265 thread->set_vm_result(exception());
266 IRT_END
267
268
269 IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index))
270 char message[jintAsStringSize];
271 // lookup exception klass
272 symbolHandle s = oopFactory::new_symbol_handle(name, CHECK);
273 if (ProfileTraps) {
274 note_trap(thread, Deoptimization::Reason_range_check, CHECK);
275 }
276 // create exception
277 sprintf(message, "%d", index);
278 THROW_MSG(s(), message);
279 IRT_END
280
281 IRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException(
282 JavaThread* thread, oopDesc* obj))
283
284 ResourceMark rm(thread);
285 char* message = SharedRuntime::generate_class_cast_message(
286 thread, Klass::cast(obj->klass())->external_name());
287
288 if (ProfileTraps) {
289 note_trap(thread, Deoptimization::Reason_class_check, CHECK);
290 }
291
292 // create exception
293 THROW_MSG(vmSymbols::java_lang_ClassCastException(), message);
294 IRT_END
295
296
297 // exception_handler_for_exception(...) returns the continuation address,
298 // the exception oop (via TLS) and sets the bci/bcp for the continuation.
299 // The exception oop is returned to make sure it is preserved over GC (it
300 // is only on the stack if the exception was thrown explicitly via athrow).
301 // During this operation, the expression stack contains the values for the
302 // bci where the exception happened. If the exception was propagated back
303 // from a call, the expression stack contains the values for the bci at the
304 // invoke w/o arguments (i.e., as if one were inside the call).
305 IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception))
306
307 Handle h_exception(thread, exception);
308 methodHandle h_method (thread, method(thread));
309 constantPoolHandle h_constants(thread, h_method->constants());
310 typeArrayHandle h_extable (thread, h_method->exception_table());
311 bool should_repeat;
312 int handler_bci;
313 int current_bci = bcp(thread) - h_method->code_base();
314
315 // Need to do this check first since when _do_not_unlock_if_synchronized
316 // is set, we don't want to trigger any classloading which may make calls
317 // into java, or surprisingly find a matching exception handler for bci 0
318 // since at this moment the method hasn't been "officially" entered yet.
319 if (thread->do_not_unlock_if_synchronized()) {
320 ResourceMark rm;
321 assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized");
322 thread->set_vm_result(exception);
323 #ifdef CC_INTERP
324 return (address) -1;
325 #else
326 return Interpreter::remove_activation_entry();
327 #endif
328 }
329
330 do {
331 should_repeat = false;
332
333 // assertions
334 #ifdef ASSERT
335 assert(h_exception.not_null(), "NULL exceptions should be handled by athrow");
336 assert(h_exception->is_oop(), "just checking");
337 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
338 if (!(h_exception->is_a(SystemDictionary::throwable_klass()))) {
339 if (ExitVMOnVerifyError) vm_exit(-1);
340 ShouldNotReachHere();
341 }
342 #endif
343
344 // tracing
345 if (TraceExceptions) {
346 ttyLocker ttyl;
347 ResourceMark rm(thread);
348 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", h_exception->print_value_string(), (address)h_exception());
349 tty->print_cr(" thrown in interpreter method <%s>", h_method->print_value_string());
350 tty->print_cr(" at bci %d for thread " INTPTR_FORMAT, current_bci, thread);
351 }
352 // Don't go paging in something which won't be used.
353 // else if (h_extable->length() == 0) {
354 // // disabled for now - interpreter is not using shortcut yet
355 // // (shortcut is not to call runtime if we have no exception handlers)
356 // // warning("performance bug: should not call runtime if method has no exception handlers");
357 // }
358 // for AbortVMOnException flag
359 NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
360
361 // exception handler lookup
362 KlassHandle h_klass(THREAD, h_exception->klass());
363 handler_bci = h_method->fast_exception_handler_bci_for(h_klass, current_bci, THREAD);
364 if (HAS_PENDING_EXCEPTION) {
365 // We threw an exception while trying to find the exception handler.
366 // Transfer the new exception to the exception handle which will
367 // be set into thread local storage, and do another lookup for an
368 // exception handler for this exception, this time starting at the
369 // BCI of the exception handler which caused the exception to be
370 // thrown (bug 4307310).
371 h_exception = Handle(THREAD, PENDING_EXCEPTION);
372 CLEAR_PENDING_EXCEPTION;
373 if (handler_bci >= 0) {
374 current_bci = handler_bci;
375 should_repeat = true;
376 }
377 }
378 } while (should_repeat == true);
379
380 // notify JVMTI of an exception throw; JVMTI will detect if this is a first
381 // time throw or a stack unwinding throw and accordingly notify the debugger
382 if (JvmtiExport::can_post_exceptions()) {
383 JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception());
384 }
385
386 #ifdef CC_INTERP
387 address continuation = (address)(intptr_t) handler_bci;
388 #else
389 address continuation = NULL;
390 #endif
391 address handler_pc = NULL;
392 if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) {
393 // Forward exception to callee (leaving bci/bcp untouched) because (a) no
394 // handler in this method, or (b) after a stack overflow there is not yet
395 // enough stack space available to reprotect the stack.
396 #ifndef CC_INTERP
397 continuation = Interpreter::remove_activation_entry();
398 #endif
399 // Count this for compilation purposes
400 h_method->interpreter_throwout_increment();
401 } else {
402 // handler in this method => change bci/bcp to handler bci/bcp and continue there
403 handler_pc = h_method->code_base() + handler_bci;
404 #ifndef CC_INTERP
405 set_bcp_and_mdp(handler_pc, thread);
406 continuation = Interpreter::dispatch_table(vtos)[*handler_pc];
407 #endif
408 }
409 // notify debugger of an exception catch
410 // (this is good for exceptions caught in native methods as well)
411 if (JvmtiExport::can_post_exceptions()) {
412 JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL));
413 }
414
415 thread->set_vm_result(h_exception());
416 return continuation;
417 IRT_END
418
419
420 IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread))
421 assert(thread->has_pending_exception(), "must only ne called if there's an exception pending");
422 // nothing to do - eventually we should remove this code entirely (see comments @ call sites)
423 IRT_END
424
425
426 IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread))
427 THROW(vmSymbols::java_lang_AbstractMethodError());
428 IRT_END
429
430
431 IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
432 THROW(vmSymbols::java_lang_IncompatibleClassChangeError());
433 IRT_END
434
435
436 //------------------------------------------------------------------------------------------------------------------------
437 // Fields
438 //
439
440 IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
441 // resolve field
442 FieldAccessInfo info;
443 constantPoolHandle pool(thread, method(thread)->constants());
444 bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
445
446 {
447 JvmtiHideSingleStepping jhss(thread);
448 LinkResolver::resolve_field(info, pool, two_byte_index(thread),
449 bytecode, false, CHECK);
450 } // end JvmtiHideSingleStepping
451
452 // check if link resolution caused cpCache to be updated
453 if (already_resolved(thread)) return;
454
455 // compute auxiliary field attributes
456 TosState state = as_TosState(info.field_type());
457
458 // We need to delay resolving put instructions on final fields
459 // until we actually invoke one. This is required so we throw
460 // exceptions at the correct place. If we do not resolve completely
461 // in the current pass, leaving the put_code set to zero will
462 // cause the next put instruction to reresolve.
463 bool is_put = (bytecode == Bytecodes::_putfield ||
464 bytecode == Bytecodes::_putstatic);
465 Bytecodes::Code put_code = (Bytecodes::Code)0;
466
467 // We also need to delay resolving getstatic instructions until the
468 // class is intitialized. This is required so that access to the static
469 // field will call the initialization function every time until the class
470 // is completely initialized ala. in 2.17.5 in JVM Specification.
471 instanceKlass *klass = instanceKlass::cast(info.klass()->as_klassOop());
472 bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
473 !klass->is_initialized());
474 Bytecodes::Code get_code = (Bytecodes::Code)0;
475
476
477 if (!uninitialized_static) {
478 get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
479 if (is_put || !info.access_flags().is_final()) {
480 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
481 }
482 }
483
484 cache_entry(thread)->set_field(
485 get_code,
486 put_code,
487 info.klass(),
488 info.field_index(),
489 info.field_offset(),
490 state,
491 info.access_flags().is_final(),
492 info.access_flags().is_volatile()
493 );
494 IRT_END
495
496
497 //------------------------------------------------------------------------------------------------------------------------
498 // Synchronization
499 //
500 // The interpreter's synchronization code is factored out so that it can
501 // be shared by method invocation and synchronized blocks.
502 //%note synchronization_3
503
504 static void trace_locking(Handle& h_locking_obj, bool is_locking) {
505 ObjectSynchronizer::trace_locking(h_locking_obj, false, true, is_locking);
506 }
507
508
509 //%note monitor_1
510 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem))
511 #ifdef ASSERT
512 thread->last_frame().interpreter_frame_verify_monitor(elem);
513 #endif
514 if (PrintBiasedLockingStatistics) {
515 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
516 }
517 Handle h_obj(thread, elem->obj());
518 assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
519 "must be NULL or an object");
520 if (UseBiasedLocking) {
521 // Retry fast entry if bias is revoked to avoid unnecessary inflation
522 ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK);
523 } else {
524 ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK);
525 }
526 assert(Universe::heap()->is_in_reserved_or_null(elem->obj()),
527 "must be NULL or an object");
528 #ifdef ASSERT
529 thread->last_frame().interpreter_frame_verify_monitor(elem);
530 #endif
531 IRT_END
532
533
534 //%note monitor_1
535 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, BasicObjectLock* elem))
536 #ifdef ASSERT
537 thread->last_frame().interpreter_frame_verify_monitor(elem);
538 #endif
539 Handle h_obj(thread, elem->obj());
540 assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
541 "must be NULL or an object");
542 if (elem == NULL || h_obj()->is_unlocked()) {
543 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
544 }
545 ObjectSynchronizer::slow_exit(h_obj(), elem->lock(), thread);
546 // Free entry. This must be done here, since a pending exception might be installed on
547 // exit. If it is not cleared, the exception handling code will try to unlock the monitor again.
548 elem->set_obj(NULL);
549 #ifdef ASSERT
550 thread->last_frame().interpreter_frame_verify_monitor(elem);
551 #endif
552 IRT_END
553
554
555 IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread))
556 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
557 IRT_END
558
559
560 IRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* thread))
561 // Returns an illegal exception to install into the current thread. The
562 // pending_exception flag is cleared so normal exception handling does not
563 // trigger. Any current installed exception will be overwritten. This
564 // method will be called during an exception unwind.
565
566 assert(!HAS_PENDING_EXCEPTION, "no pending exception");
567 Handle exception(thread, thread->vm_result());
568 assert(exception() != NULL, "vm result should be set");
569 thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
570 if (!exception->is_a(SystemDictionary::threaddeath_klass())) {
571 exception = get_preinitialized_exception(
572 SystemDictionary::IllegalMonitorStateException_klass(),
573 CATCH);
574 }
575 thread->set_vm_result(exception());
576 IRT_END
577
578
579 //------------------------------------------------------------------------------------------------------------------------
580 // Invokes
581
582 IRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* thread, methodOopDesc* method, address bcp))
583 return method->orig_bytecode_at(method->bci_from(bcp));
584 IRT_END
585
586 IRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* thread, methodOopDesc* method, address bcp, Bytecodes::Code new_code))
587 method->set_orig_bytecode_at(method->bci_from(bcp), new_code);
588 IRT_END
589
590 IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, methodOopDesc* method, address bcp))
591 JvmtiExport::post_raw_breakpoint(thread, method, bcp);
592 IRT_END
593
594 IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode))
595 // extract receiver from the outgoing argument list if necessary
596 Handle receiver(thread, NULL);
597 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
598 ResourceMark rm(thread);
599 methodHandle m (thread, method(thread));
600 int bci = m->bci_from(bcp(thread));
601 Bytecode_invoke* call = Bytecode_invoke_at(m, bci);
602 symbolHandle signature (thread, call->signature());
603 receiver = Handle(thread,
604 thread->last_frame().interpreter_callee_receiver(signature));
605 assert(Universe::heap()->is_in_reserved_or_null(receiver()),
606 "sanity check");
607 assert(receiver.is_null() ||
608 Universe::heap()->is_in_reserved(receiver->klass()),
609 "sanity check");
610 }
611
612 // resolve method
613 CallInfo info;
614 constantPoolHandle pool(thread, method(thread)->constants());
615
616 {
617 JvmtiHideSingleStepping jhss(thread);
618 LinkResolver::resolve_invoke(info, receiver, pool,
619 two_byte_index(thread), bytecode, CHECK);
620 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
621 int retry_count = 0;
622 while (info.resolved_method()->is_old()) {
623 // It is very unlikely that method is redefined more than 100 times
624 // in the middle of resolve. If it is looping here more than 100 times
625 // means then there could be a bug here.
626 guarantee((retry_count++ < 100),
627 "Could not resolve to latest version of redefined method");
628 // method is redefined in the middle of resolve so re-try.
629 LinkResolver::resolve_invoke(info, receiver, pool,
630 two_byte_index(thread), bytecode, CHECK);
631 }
632 }
633 } // end JvmtiHideSingleStepping
634
635 // check if link resolution caused cpCache to be updated
636 if (already_resolved(thread)) return;
637
638 if (bytecode == Bytecodes::_invokeinterface) {
639
640 if (TraceItables && Verbose) {
641 ResourceMark rm(thread);
642 tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
643 }
644 if (info.resolved_method()->method_holder() ==
645 SystemDictionary::object_klass()) {
646 // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
647 // (see also cpCacheOop.cpp for details)
648 methodHandle rm = info.resolved_method();
649 assert(rm->is_final() || info.has_vtable_index(),
650 "should have been set already");
651 cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
652 } else {
653 // Setup itable entry
654 int index = klassItable::compute_itable_index(info.resolved_method()());
655 cache_entry(thread)->set_interface_call(info.resolved_method(), index);
656 }
657 } else {
658 cache_entry(thread)->set_method(
659 bytecode,
660 info.resolved_method(),
661 info.vtable_index());
662 }
663 IRT_END
664
665
666 //------------------------------------------------------------------------------------------------------------------------
667 // Miscellaneous
668
669
670 #ifndef PRODUCT
671 static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
672 if (TraceInvocationCounterOverflow) {
673 InvocationCounter* ic = m->invocation_counter();
674 InvocationCounter* bc = m->backedge_counter();
675 ResourceMark rm;
676 const char* msg =
677 branch_bcp == NULL
678 ? "comp-policy cntr ovfl @ %d in entry of "
679 : "comp-policy cntr ovfl @ %d in loop of ";
680 tty->print(msg, bci);
681 m->print_value();
682 tty->cr();
683 ic->print();
684 bc->print();
685 if (ProfileInterpreter) {
686 if (branch_bcp != NULL) {
687 methodDataOop mdo = m->method_data();
688 if (mdo != NULL) {
689 int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
690 tty->print_cr("back branch count = %d", count);
691 }
692 }
693 }
694 }
695 }
696
697 static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
698 if (TraceOnStackReplacement) {
699 ResourceMark rm;
700 tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
701 method->print_short_name(tty);
702 tty->print_cr(" at bci %d", bci);
703 }
704 }
705 #endif // !PRODUCT
706
707 IRT_ENTRY(nmethod*,
708 InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp))
709 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
710 // flag, in case this method triggers classloading which will call into Java.
711 UnlockFlagSaver fs(thread);
712
713 frame fr = thread->last_frame();
714 assert(fr.is_interpreted_frame(), "must come from interpreter");
715 methodHandle method(thread, fr.interpreter_frame_method());
716 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0;
717 const int bci = method->bci_from(fr.interpreter_frame_bcp());
718 NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
719
720 if (JvmtiExport::can_post_interpreter_events()) {
721 if (thread->is_interp_only_mode()) {
722 // If certain JVMTI events (e.g. frame pop event) are requested then the
723 // thread is forced to remain in interpreted code. This is
724 // implemented partly by a check in the run_compiled_code
725 // section of the interpreter whether we should skip running
726 // compiled code, and partly by skipping OSR compiles for
727 // interpreted-only threads.
728 if (branch_bcp != NULL) {
729 CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
730 return NULL;
731 }
732 }
733 }
734
735 if (branch_bcp == NULL) {
736 // when code cache is full, compilation gets switched off, UseCompiler
737 // is set to false
738 if (!method->has_compiled_code() && UseCompiler) {
739 CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL);
740 } else {
741 // Force counter overflow on method entry, even if no compilation
742 // happened. (The method_invocation_event call does this also.)
743 CompilationPolicy::policy()->reset_counter_for_invocation_event(method);
744 }
745 // compilation at an invocation overflow no longer goes and retries test for
746 // compiled method. We always run the loser of the race as interpreted.
747 // so return NULL
748 return NULL;
749 } else {
750 // counter overflow in a loop => try to do on-stack-replacement
751 nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
752 NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
753 // when code cache is full, we should not compile any more...
754 if (osr_nm == NULL && UseCompiler) {
755 const int branch_bci = method->bci_from(branch_bcp);
756 CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
757 osr_nm = method->lookup_osr_nmethod_for(bci);
758 }
759 if (osr_nm == NULL) {
760 CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
761 return NULL;
762 } else {
763 // We may need to do on-stack replacement which requires that no
764 // monitors in the activation are biased because their
765 // BasicObjectLocks will need to migrate during OSR. Force
766 // unbiasing of all monitors in the activation now (even though
767 // the OSR nmethod might be invalidated) because we don't have a
768 // safepoint opportunity later once the migration begins.
769 if (UseBiasedLocking) {
770 ResourceMark rm;
771 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
772 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
773 kptr < fr.interpreter_frame_monitor_begin();
774 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
775 if( kptr->obj() != NULL ) {
776 objects_to_revoke->append(Handle(THREAD, kptr->obj()));
777 }
778 }
779 BiasedLocking::revoke(objects_to_revoke);
780 }
781
782 return osr_nm;
783 }
784 }
785 IRT_END
786
787 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))
788 assert(ProfileInterpreter, "must be profiling interpreter");
789 int bci = method->bci_from(cur_bcp);
790 methodDataOop mdo = method->method_data();
791 if (mdo == NULL) return 0;
792 return mdo->bci_to_di(bci);
793 IRT_END
794
795 IRT_ENTRY(jint, InterpreterRuntime::profile_method(JavaThread* thread, address cur_bcp))
796 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
797 // flag, in case this method triggers classloading which will call into Java.
798 UnlockFlagSaver fs(thread);
799
800 assert(ProfileInterpreter, "must be profiling interpreter");
801 frame fr = thread->last_frame();
802 assert(fr.is_interpreted_frame(), "must come from interpreter");
803 methodHandle method(thread, fr.interpreter_frame_method());
804 int bci = method->bci_from(cur_bcp);
805 methodOopDesc::build_interpreter_method_data(method, THREAD);
806 if (HAS_PENDING_EXCEPTION) {
807 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
808 CLEAR_PENDING_EXCEPTION;
809 // and fall through...
810 }
811 methodDataOop mdo = method->method_data();
812 if (mdo == NULL) return 0;
813 return mdo->bci_to_di(bci);
814 IRT_END
815
816
817 #ifdef ASSERT
818 IRT_LEAF(void, InterpreterRuntime::verify_mdp(methodOopDesc* method, address bcp, address mdp))
819 assert(ProfileInterpreter, "must be profiling interpreter");
820
821 methodDataOop mdo = method->method_data();
822 assert(mdo != NULL, "must not be null");
823
824 int bci = method->bci_from(bcp);
825
826 address mdp2 = mdo->bci_to_dp(bci);
827 if (mdp != mdp2) {
828 ResourceMark rm;
829 ResetNoHandleMark rnm; // In a LEAF entry.
830 HandleMark hm;
831 tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci);
832 int current_di = mdo->dp_to_di(mdp);
833 int expected_di = mdo->dp_to_di(mdp2);
834 tty->print_cr(" actual di %d expected di %d", current_di, expected_di);
835 int expected_approx_bci = mdo->data_at(expected_di)->bci();
836 int approx_bci = -1;
837 if (current_di >= 0) {
838 approx_bci = mdo->data_at(current_di)->bci();
839 }
840 tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci);
841 mdo->print_on(tty);
842 method->print_codes();
843 }
844 assert(mdp == mdp2, "wrong mdp");
845 IRT_END
846 #endif // ASSERT
847
848 IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci))
849 assert(ProfileInterpreter, "must be profiling interpreter");
850 ResourceMark rm(thread);
851 HandleMark hm(thread);
852 frame fr = thread->last_frame();
853 assert(fr.is_interpreted_frame(), "must come from interpreter");
854 methodDataHandle h_mdo(thread, fr.interpreter_frame_method()->method_data());
855
856 // Grab a lock to ensure atomic access to setting the return bci and
857 // the displacement. This can block and GC, invalidating all naked oops.
858 MutexLocker ml(RetData_lock);
859
860 // ProfileData is essentially a wrapper around a derived oop, so we
861 // need to take the lock before making any ProfileData structures.
862 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(fr.interpreter_frame_mdp()));
863 RetData* rdata = data->as_RetData();
864 address new_mdp = rdata->fixup_ret(return_bci, h_mdo);
865 fr.interpreter_frame_set_mdp(new_mdp);
866 IRT_END
867
868
869 IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread))
870 // We used to need an explict preserve_arguments here for invoke bytecodes. However,
871 // stack traversal automatically takes care of preserving arguments for invoke, so
872 // this is no longer needed.
873
874 // IRT_END does an implicit safepoint check, hence we are guaranteed to block
875 // if this is called during a safepoint
876
877 if (JvmtiExport::should_post_single_step()) {
878 // We are called during regular safepoints and when the VM is
879 // single stepping. If any thread is marked for single stepping,
880 // then we may have JVMTI work to do.
881 JvmtiExport::at_single_stepping_point(thread, method(thread), bcp(thread));
882 }
883 IRT_END
884
885 IRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj,
886 ConstantPoolCacheEntry *cp_entry))
887
888 // check the access_flags for the field in the klass
889 instanceKlass* ik = instanceKlass::cast((klassOop)cp_entry->f1());
890 typeArrayOop fields = ik->fields();
891 int index = cp_entry->field_index();
892 assert(index < fields->length(), "holders field index is out of range");
893 // bail out if field accesses are not watched
894 if ((fields->ushort_at(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return;
895
896 switch(cp_entry->flag_state()) {
897 case btos: // fall through
898 case ctos: // fall through
899 case stos: // fall through
900 case itos: // fall through
901 case ftos: // fall through
902 case ltos: // fall through
903 case dtos: // fall through
904 case atos: break;
905 default: ShouldNotReachHere(); return;
906 }
907 bool is_static = (obj == NULL);
908 HandleMark hm(thread);
909
910 Handle h_obj;
911 if (!is_static) {
912 // non-static field accessors have an object, but we need a handle
913 h_obj = Handle(thread, obj);
914 }
915 instanceKlassHandle h_cp_entry_f1(thread, (klassOop)cp_entry->f1());
916 jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static);
917 JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid);
918 IRT_END
919
920 IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread,
921 oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value))
922
923 klassOop k = (klassOop)cp_entry->f1();
924
925 // check the access_flags for the field in the klass
926 instanceKlass* ik = instanceKlass::cast(k);
927 typeArrayOop fields = ik->fields();
928 int index = cp_entry->field_index();
929 assert(index < fields->length(), "holders field index is out of range");
930 // bail out if field modifications are not watched
931 if ((fields->ushort_at(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return;
932
933 char sig_type = '\0';
934
935 switch(cp_entry->flag_state()) {
936 case btos: sig_type = 'Z'; break;
937 case ctos: sig_type = 'C'; break;
938 case stos: sig_type = 'S'; break;
939 case itos: sig_type = 'I'; break;
940 case ftos: sig_type = 'F'; break;
941 case atos: sig_type = 'L'; break;
942 case ltos: sig_type = 'J'; break;
943 case dtos: sig_type = 'D'; break;
944 default: ShouldNotReachHere(); return;
945 }
946 bool is_static = (obj == NULL);
947
948 HandleMark hm(thread);
949 instanceKlassHandle h_klass(thread, k);
950 jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2(), is_static);
951 jvalue fvalue;
952 #ifdef _LP64
953 fvalue = *value;
954 #else
955 // Long/double values are stored unaligned and also noncontiguously with
956 // tagged stacks. We can't just do a simple assignment even in the non-
957 // J/D cases because a C++ compiler is allowed to assume that a jvalue is
958 // 8-byte aligned, and interpreter stack slots are only 4-byte aligned.
959 // We assume that the two halves of longs/doubles are stored in interpreter
960 // stack slots in platform-endian order.
961 jlong_accessor u;
962 jint* newval = (jint*)value;
963 u.words[0] = newval[0];
964 u.words[1] = newval[Interpreter::stackElementWords()]; // skip if tag
965 fvalue.j = u.long_value;
966 #endif // _LP64
967
968 Handle h_obj;
969 if (!is_static) {
970 // non-static field accessors have an object, but we need a handle
971 h_obj = Handle(thread, obj);
972 }
973
974 JvmtiExport::post_raw_field_modification(thread, method(thread), bcp(thread), h_klass, h_obj,
975 fid, sig_type, &fvalue);
976 IRT_END
977
978 IRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread))
979 JvmtiExport::post_method_entry(thread, InterpreterRuntime::method(thread), InterpreterRuntime::last_frame(thread));
980 IRT_END
981
982
983 IRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread))
984 JvmtiExport::post_method_exit(thread, InterpreterRuntime::method(thread), InterpreterRuntime::last_frame(thread));
985 IRT_END
986
987 IRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc))
988 {
989 return (Interpreter::contains(pc) ? 1 : 0);
990 }
991 IRT_END
992
993
994 // Implementation of SignatureHandlerLibrary
995
996 address SignatureHandlerLibrary::set_handler_blob() {
997 BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
998 if (handler_blob == NULL) {
999 return NULL;
1000 }
1001 address handler = handler_blob->instructions_begin();
1002 _handler_blob = handler_blob;
1003 _handler = handler;
1004 return handler;
1005 }
1006
1007 void SignatureHandlerLibrary::initialize() {
1008 if (_fingerprints != NULL) {
1009 return;
1010 }
1011 if (set_handler_blob() == NULL) {
1012 vm_exit_out_of_memory(blob_size, "native signature handlers");
1013 }
1014
1015 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer",
1016 SignatureHandlerLibrary::buffer_size);
1017 _buffer = bb->instructions_begin();
1018
1019 _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
1020 _handlers = new(ResourceObj::C_HEAP)GrowableArray<address>(32, true);
1021 }
1022
1023 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
1024 address handler = _handler;
1025 int code_size = buffer->pure_code_size();
1026 if (handler + code_size > _handler_blob->instructions_end()) {
1027 // get a new handler blob
1028 handler = set_handler_blob();
1029 }
1030 if (handler != NULL) {
1031 memcpy(handler, buffer->code_begin(), code_size);
1032 pd_set_handler(handler);
1033 ICache::invalidate_range(handler, code_size);
1034 _handler = handler + code_size;
1035 }
1036 return handler;
1037 }
1038
1039 void SignatureHandlerLibrary::add(methodHandle method) {
1040 if (method->signature_handler() == NULL) {
1041 // use slow signature handler if we can't do better
1042 int handler_index = -1;
1043 // check if we can use customized (fast) signature handler
1044 if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) {
1045 // use customized signature handler
1046 MutexLocker mu(SignatureHandlerLibrary_lock);
1047 // make sure data structure is initialized
1048 initialize();
1049 // lookup method signature's fingerprint
1050 uint64_t fingerprint = Fingerprinter(method).fingerprint();
1051 handler_index = _fingerprints->find(fingerprint);
1052 // create handler if necessary
1053 if (handler_index < 0) {
1054 ResourceMark rm;
1055 ptrdiff_t align_offset = (address)
1056 round_to((intptr_t)_buffer, CodeEntryAlignment) - (address)_buffer;
1057 CodeBuffer buffer((address)(_buffer + align_offset),
1058 SignatureHandlerLibrary::buffer_size - align_offset);
1059 InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
1060 // copy into code heap
1061 address handler = set_handler(&buffer);
1062 if (handler == NULL) {
1063 // use slow signature handler
1064 } else {
1065 // debugging suppport
1066 if (PrintSignatureHandlers) {
1067 tty->cr();
1068 tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)",
1069 _handlers->length(),
1070 (method->is_static() ? "static" : "receiver"),
1071 method->name_and_sig_as_C_string(),
1072 fingerprint,
1073 buffer.code_size());
1074 Disassembler::decode(handler, handler + buffer.code_size());
1075 #ifndef PRODUCT
1076 tty->print_cr(" --- associated result handler ---");
1077 address rh_begin = Interpreter::result_handler(method()->result_type());
1078 address rh_end = rh_begin;
1079 while (*(int*)rh_end != 0) {
1080 rh_end += sizeof(int);
1081 }
1082 Disassembler::decode(rh_begin, rh_end);
1083 #endif
1084 }
1085 // add handler to library
1086 _fingerprints->append(fingerprint);
1087 _handlers->append(handler);
1088 // set handler index
1089 assert(_fingerprints->length() == _handlers->length(), "sanity check");
1090 handler_index = _fingerprints->length() - 1;
1091 }
1092 }
1093 } else {
1094 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1095 }
1096 if (handler_index < 0) {
1097 // use generic signature handler
1098 method->set_signature_handler(Interpreter::slow_signature_handler());
1099 } else {
1100 // set handler
1101 method->set_signature_handler(_handlers->at(handler_index));
1102 }
1103 }
1104 assert(method->signature_handler() == Interpreter::slow_signature_handler() ||
1105 _handlers->find(method->signature_handler()) == _fingerprints->find(Fingerprinter(method).fingerprint()),
1106 "sanity check");
1107 }
1108
1109
1110 BufferBlob* SignatureHandlerLibrary::_handler_blob = NULL;
1111 address SignatureHandlerLibrary::_handler = NULL;
1112 GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL;
1113 GrowableArray<address>* SignatureHandlerLibrary::_handlers = NULL;
1114 address SignatureHandlerLibrary::_buffer = NULL;
1115
1116
1117 IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, methodOopDesc* method))
1118 methodHandle m(thread, method);
1119 assert(m->is_native(), "sanity check");
1120 // lookup native function entry point if it doesn't exist
1121 bool in_base_library;
1122 if (!m->has_native_function()) {
1123 NativeLookup::lookup(m, in_base_library, CHECK);
1124 }
1125 // make sure signature handler is installed
1126 SignatureHandlerLibrary::add(m);
1127 // The interpreter entry point checks the signature handler first,
1128 // before trying to fetch the native entry point and klass mirror.
1129 // We must set the signature handler last, so that multiple processors
1130 // preparing the same method will be sure to see non-null entry & mirror.
1131 IRT_END
1132
1133 #if defined(IA32) || defined(AMD64)
1134 IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address))
1135 if (src_address == dest_address) {
1136 return;
1137 }
1138 ResetNoHandleMark rnm; // In a LEAF entry.
1139 HandleMark hm;
1140 ResourceMark rm;
1141 frame fr = thread->last_frame();
1142 assert(fr.is_interpreted_frame(), "");
1143 jint bci = fr.interpreter_frame_bci();
1144 methodHandle mh(thread, fr.interpreter_frame_method());
1145 Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
1146 ArgumentSizeComputer asc(invoke->signature());
1147 int size_of_arguments = (asc.size() + (invoke->is_invokestatic() ? 0 : 1)); // receiver
1148 Copy::conjoint_bytes(src_address, dest_address,
1149 size_of_arguments * Interpreter::stackElementSize());
1150 IRT_END
1151 #endif