Mercurial > hg > graal-compiler
comparison src/share/vm/runtime/sharedRuntime.cpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 4a1b4400bb1f d3f3f7677537 |
children | a560c9b81f0f |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
480 // The continuation address is the entry point of the exception handler of the | 480 // The continuation address is the entry point of the exception handler of the |
481 // previous frame depending on the return address. | 481 // previous frame depending on the return address. |
482 | 482 |
483 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { | 483 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { |
484 assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); | 484 assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); |
485 assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?"); | |
485 | 486 |
486 // Reset method handle flag. | 487 // Reset method handle flag. |
487 thread->set_is_method_handle_return(false); | 488 thread->set_is_method_handle_return(false); |
488 | 489 |
489 #ifdef GRAAL | 490 #ifdef GRAAL |
1278 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) || | 1279 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) || |
1279 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) || | 1280 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) || |
1280 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || | 1281 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || |
1281 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); | 1282 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); |
1282 | 1283 |
1283 // We do not patch the call site if the caller nmethod has been made non-entrant. | 1284 assert(caller_nm->is_alive(), "It should be alive"); |
1284 if (!caller_nm->is_in_use()) { | |
1285 return callee_method; | |
1286 } | |
1287 | 1285 |
1288 #ifndef PRODUCT | 1286 #ifndef PRODUCT |
1289 // tracing/debugging/statistics | 1287 // tracing/debugging/statistics |
1290 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : | 1288 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : |
1291 (is_virtual) ? (&_resolve_virtual_ctr) : | 1289 (is_virtual) ? (&_resolve_virtual_ctr) : |
1351 | 1349 |
1352 // Lock blocks for safepoint during which both nmethods can change state. | 1350 // Lock blocks for safepoint during which both nmethods can change state. |
1353 | 1351 |
1354 // Now that we are ready to patch if the Method* was redefined then | 1352 // Now that we are ready to patch if the Method* was redefined then |
1355 // don't update call site and let the caller retry. | 1353 // don't update call site and let the caller retry. |
1356 // Don't update call site if caller nmethod has been made non-entrant | |
1357 // as it is a waste of time. | |
1358 // Don't update call site if callee nmethod was unloaded or deoptimized. | 1354 // Don't update call site if callee nmethod was unloaded or deoptimized. |
1359 // Don't update call site if callee nmethod was replaced by an other nmethod | 1355 // Don't update call site if callee nmethod was replaced by an other nmethod |
1360 // which may happen when multiply alive nmethod (tiered compilation) | 1356 // which may happen when multiply alive nmethod (tiered compilation) |
1361 // will be supported. | 1357 // will be supported. |
1362 if (!callee_method->is_old() && caller_nm->is_in_use() && | 1358 if (!callee_method->is_old() && |
1363 (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { | 1359 (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { |
1364 #ifdef ASSERT | 1360 #ifdef ASSERT |
1365 // We must not try to patch to jump to an already unloaded method. | 1361 // We must not try to patch to jump to an already unloaded method. |
1366 if (dest_entry_point != 0) { | 1362 if (dest_entry_point != 0) { |
1367 CodeBlob* cb = CodeCache::find_blob(dest_entry_point); | 1363 CodeBlob* cb = CodeCache::find_blob(dest_entry_point); |
1558 // event can't be posted when the stub is created as locks are held | 1554 // event can't be posted when the stub is created as locks are held |
1559 // - instead the event will be deferred until the event collector goes | 1555 // - instead the event will be deferred until the event collector goes |
1560 // out of scope. | 1556 // out of scope. |
1561 JvmtiDynamicCodeEventCollector event_collector; | 1557 JvmtiDynamicCodeEventCollector event_collector; |
1562 | 1558 |
1563 // Update inline cache to megamorphic. Skip update if caller has been | 1559 // Update inline cache to megamorphic. Skip update if we are called from interpreted. |
1564 // made non-entrant or we are called from interpreted. | |
1565 { MutexLocker ml_patch (CompiledIC_lock); | 1560 { MutexLocker ml_patch (CompiledIC_lock); |
1566 RegisterMap reg_map(thread, false); | 1561 RegisterMap reg_map(thread, false); |
1567 frame caller_frame = thread->last_frame().sender(®_map); | 1562 frame caller_frame = thread->last_frame().sender(®_map); |
1568 CodeBlob* cb = caller_frame.cb(); | 1563 CodeBlob* cb = caller_frame.cb(); |
1569 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { | 1564 if (cb->is_nmethod()) { |
1570 // Not a non-entrant nmethod, so find inline_cache | |
1571 CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); | 1565 CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); |
1572 bool should_be_mono = false; | 1566 bool should_be_mono = false; |
1573 if (inline_cache->is_optimized()) { | 1567 if (inline_cache->is_optimized()) { |
1574 if (TraceCallFixup) { | 1568 if (TraceCallFixup) { |
1575 ResourceMark rm(thread); | 1569 ResourceMark rm(thread); |
1708 // leads to very hard to track down bugs, if an inline cache gets updated | 1702 // leads to very hard to track down bugs, if an inline cache gets updated |
1709 // to a wrong method). It should not be performance critical, since the | 1703 // to a wrong method). It should not be performance critical, since the |
1710 // resolve is only done once. | 1704 // resolve is only done once. |
1711 | 1705 |
1712 MutexLocker ml(CompiledIC_lock); | 1706 MutexLocker ml(CompiledIC_lock); |
1713 // | 1707 if (is_static_call) { |
1714 // We do not patch the call site if the nmethod has been made non-entrant | 1708 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); |
1715 // as it is a waste of time | 1709 ssc->set_to_clean(); |
1716 // | 1710 } else { |
1717 if (caller_nm->is_in_use()) { | 1711 // compiled, dispatched call (which used to call an interpreted method) |
1718 if (is_static_call) { | 1712 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); |
1719 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); | 1713 inline_cache->set_to_clean(); |
1720 ssc->set_to_clean(); | |
1721 } else { | |
1722 // compiled, dispatched call (which used to call an interpreted method) | |
1723 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); | |
1724 inline_cache->set_to_clean(); | |
1725 } | |
1726 } | 1714 } |
1727 } | 1715 } |
1728 | 1716 |
1729 } | 1717 } |
1730 | 1718 |