Mercurial > hg > truffle
diff src/share/vm/runtime/deoptimization.cpp @ 5130:ab038e0d6b43
Merge
author | Lukas Stadler <lukas.stadler@jku.at> |
---|---|
date | Wed, 21 Mar 2012 11:28:22 +0100 |
parents | 51111665eda6 34518fd74518 |
children | 667c7bc2435b |
line wrap: on
line diff
--- a/src/share/vm/runtime/deoptimization.cpp Wed Mar 21 10:47:02 2012 +0100 +++ b/src/share/vm/runtime/deoptimization.cpp Wed Mar 21 11:28:22 2012 +0100 @@ -1151,17 +1151,17 @@ } -void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) { +void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { assert(fr.can_be_deoptimized(), "checking frame type"); - gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal); + gather_statistics(reason, Action_none, Bytecodes::_illegal); // Patch the nmethod so that when execution returns to it we will // deopt the execution state and return to the interpreter. fr.deoptimize(thread); } -void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) { +void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) { // Deoptimize only if the frame comes from compile code. // Do not deoptimize the frame which is already patched // during the execution of the loops below. @@ -1173,12 +1173,12 @@ if (UseBiasedLocking) { revoke_biases_of_monitors(thread, fr, map); } - deoptimize_single_frame(thread, fr); + deoptimize_single_frame(thread, fr, reason); } -void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id) { +void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), "can only deoptimize other thread at a safepoint"); // Compute frame and register map based on thread and sp. @@ -1187,15 +1187,15 @@ while (fr.id() != id) { fr = fr.sender(®_map); } - deoptimize(thread, fr, ®_map); + deoptimize(thread, fr, ®_map, reason); } -void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { +void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { if (thread == Thread::current()) { - Deoptimization::deoptimize_frame_internal(thread, id); + Deoptimization::deoptimize_frame_internal(thread, id, reason); } else { - VM_DeoptimizeFrame deopt(thread, id); + VM_DeoptimizeFrame deopt(thread, id, reason); VMThread::execute(&deopt); } } @@ -1520,7 +1520,7 @@ uint this_trap_count = 0; bool maybe_prior_trap = false; bool maybe_prior_recompile = false; - pdata = query_update_method_data(trap_mdo, trap_bci, reason, + pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, //outputs: this_trap_count, maybe_prior_trap, @@ -1654,25 +1654,31 @@ Deoptimization::query_update_method_data(methodDataHandle trap_mdo, int trap_bci, Deoptimization::DeoptReason reason, + bool update_total_trap_count, //outputs: uint& ret_this_trap_count, bool& ret_maybe_prior_trap, bool& ret_maybe_prior_recompile) { - uint prior_trap_count = trap_mdo->trap_count(reason); - uint this_trap_count = trap_mdo->inc_trap_count(reason); + bool maybe_prior_trap = false; + bool maybe_prior_recompile = false; + uint this_trap_count = 0; + if (update_total_trap_count) { + uint prior_trap_count = trap_mdo->trap_count(reason); + this_trap_count = trap_mdo->inc_trap_count(reason); - // If the runtime cannot find a place to store trap history, - // it is estimated based on the general condition of the method. - // If the method has ever been recompiled, or has ever incurred - // a trap with the present reason , then this BCI is assumed - // (pessimistically) to be the culprit. - bool maybe_prior_trap = (prior_trap_count != 0); - bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0); - ProfileData* pdata = NULL; - + // If the runtime cannot find a place to store trap history, + // it is estimated based on the general condition of the method. + // If the method has ever been recompiled, or has ever incurred + // a trap with the present reason , then this BCI is assumed + // (pessimistically) to be the culprit. + maybe_prior_trap = (prior_trap_count != 0); + maybe_prior_recompile = (trap_mdo->decompile_count() != 0); + } // For reasons which are recorded per bytecode, we check per-BCI data. + ProfileData* pdata = NULL; DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); + assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); if (per_bc_reason != Reason_none) { // Find the profile data for this BCI. If there isn't one, // try to allocate one from the MDO's set of spares. @@ -1717,8 +1723,11 @@ uint ignore_this_trap_count; bool ignore_maybe_prior_trap; bool ignore_maybe_prior_recompile; + // Graal uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts + bool update_total_counts = GRAAL_ONLY(false) NOT_GRAAL(true); query_update_method_data(trap_mdo, trap_bci, (DeoptReason)reason, + update_total_counts, ignore_this_trap_count, ignore_maybe_prior_trap, ignore_maybe_prior_recompile); @@ -1831,6 +1840,21 @@ Deoptimization::DeoptAction Deoptimization::_unloaded_action = Deoptimization::Action_reinterpret; const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = { +#ifdef GRAAL + "none", + "null_check", + "range_check", + "class_check", + "array_check", + "unreached", + "type_checked_inlining", + "optimized_type_check", + "not_compiled_exception_handler", + "unresolved", + "jsr_mismatch", + "div0_check", + "constraint" +#else // Note: Keep this in sync. with enum DeoptReason. "none", "null_check", @@ -1849,6 +1873,7 @@ "age", "predicate", "loop_limit_check" +#endif }; const char* Deoptimization::_trap_action_name[Action_LIMIT] = { // Note: Keep this in sync. with enum DeoptAction.