# HG changeset patch # User vlivanov # Date 1360268631 28800 # Node ID d05ff4bf41b34c986b444d94853072b14f460d7e # Parent 454d7cc622ab7b9ad47748c097c35739ef89680b# Parent 4fcf990aa34afaf00cfdba93ce604307c890c3a6 Merge diff -r 454d7cc622ab -r d05ff4bf41b3 agent/src/os/linux/LinuxDebuggerLocal.c --- a/agent/src/os/linux/LinuxDebuggerLocal.c Wed Feb 06 15:22:32 2013 -0800 +++ b/agent/src/os/linux/LinuxDebuggerLocal.c Thu Feb 07 12:23:51 2013 -0800 @@ -280,7 +280,7 @@ return (err == PS_OK)? array : 0; } -#if defined(i386) || defined(ia64) || defined(amd64) || defined(sparc) || defined(sparcv9) +#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 (JNIEnv *env, jobject this_obj, jint lwp_id) { @@ -299,9 +299,6 @@ #ifdef i386 #define NPRGREG sun_jvm_hotspot_debugger_x86_X86ThreadContext_NPRGREG #endif -#ifdef ia64 -#define NPRGREG IA64_REG_COUNT -#endif #ifdef amd64 #define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG #endif @@ -336,13 +333,6 @@ #endif /* i386 */ -#if ia64 - regs = (*env)->GetLongArrayElements(env, array, &isCopy); - for (i = 0; i < NPRGREG; i++ ) { - regs[i] = 0xDEADDEAD; - } -#endif /* ia64 */ - #ifdef amd64 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_##reg diff -r 454d7cc622ab -r d05ff4bf41b3 agent/src/os/linux/libproc.h --- a/agent/src/os/linux/libproc.h Wed Feb 06 15:22:32 2013 -0800 +++ b/agent/src/os/linux/libproc.h Thu Feb 07 12:23:51 2013 -0800 @@ -79,14 +79,6 @@ *************************************************************************************/ -#ifdef ia64 -struct user_regs_struct { -/* copied from user.h which doesn't define this in a struct */ - -#define IA64_REG_COUNT (EF_SIZE/8+32) /* integer and fp regs */ -unsigned long regs[IA64_REG_COUNT]; /* integer and fp regs */ -}; -#endif #if defined(sparc) || defined(sparcv9) #define user_regs_struct pt_regs diff -r 454d7cc622ab -r d05ff4bf41b3 agent/src/os/win32/windbg/sawindbg.cpp --- a/agent/src/os/win32/windbg/sawindbg.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/agent/src/os/win32/windbg/sawindbg.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -27,10 +27,7 @@ #include "sun_jvm_hotspot_debugger_windbg_WindbgDebuggerLocal.h" -#ifdef _M_IA64 - #include "sun_jvm_hotspot_debugger_ia64_IA64ThreadContext.h" - #define NPRGREG sun_jvm_hotspot_debugger_ia64_IA64ThreadContext_NPRGREG -#elif _M_IX86 +#ifdef _M_IX86 #include "sun_jvm_hotspot_debugger_x86_X86ThreadContext.h" #define NPRGREG sun_jvm_hotspot_debugger_x86_X86ThreadContext_NPRGREG #elif _M_AMD64 @@ -491,92 +488,7 @@ memset(&context, 0, sizeof(CONTEXT)); #undef REG_INDEX -#ifdef _M_IA64 - #define REG_INDEX(x) sun_jvm_hotspot_debugger_ia64_IA64ThreadContext_##x - - context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG; - ptrIDebugAdvanced->GetThreadContext(&context, sizeof(CONTEXT)); - - ptrRegs[REG_INDEX(GR0)] = 0; // always 0 - ptrRegs[REG_INDEX(GR1)] = context.IntGp; // r1 - ptrRegs[REG_INDEX(GR2)] = context.IntT0; // r2-r3 - ptrRegs[REG_INDEX(GR3)] = context.IntT1; - ptrRegs[REG_INDEX(GR4)] = context.IntS0; // r4-r7 - ptrRegs[REG_INDEX(GR5)] = context.IntS1; - ptrRegs[REG_INDEX(GR6)] = context.IntS2; - ptrRegs[REG_INDEX(GR7)] = context.IntS3; - ptrRegs[REG_INDEX(GR8)] = context.IntV0; // r8 - ptrRegs[REG_INDEX(GR9)] = context.IntT2; // r9-r11 - ptrRegs[REG_INDEX(GR10)] = context.IntT3; - ptrRegs[REG_INDEX(GR11)] = context.IntT4; - ptrRegs[REG_INDEX(GR12)] = context.IntSp; // r12 stack pointer - ptrRegs[REG_INDEX(GR13)] = context.IntTeb; // r13 teb - ptrRegs[REG_INDEX(GR14)] = context.IntT5; // r14-r31 - ptrRegs[REG_INDEX(GR15)] = context.IntT6; - ptrRegs[REG_INDEX(GR16)] = context.IntT7; - ptrRegs[REG_INDEX(GR17)] = context.IntT8; - ptrRegs[REG_INDEX(GR18)] = context.IntT9; - ptrRegs[REG_INDEX(GR19)] = context.IntT10; - ptrRegs[REG_INDEX(GR20)] = context.IntT11; - ptrRegs[REG_INDEX(GR21)] = context.IntT12; - ptrRegs[REG_INDEX(GR22)] = context.IntT13; - ptrRegs[REG_INDEX(GR23)] = context.IntT14; - ptrRegs[REG_INDEX(GR24)] = context.IntT15; - ptrRegs[REG_INDEX(GR25)] = context.IntT16; - ptrRegs[REG_INDEX(GR26)] = context.IntT17; - ptrRegs[REG_INDEX(GR27)] = context.IntT18; - ptrRegs[REG_INDEX(GR28)] = context.IntT19; - ptrRegs[REG_INDEX(GR29)] = context.IntT20; - ptrRegs[REG_INDEX(GR30)] = context.IntT21; - ptrRegs[REG_INDEX(GR31)] = context.IntT22; - - ptrRegs[REG_INDEX(INT_NATS)] = context.IntNats; - ptrRegs[REG_INDEX(PREDS)] = context.Preds; - - ptrRegs[REG_INDEX(BR_RP)] = context.BrRp; - ptrRegs[REG_INDEX(BR1)] = context.BrS0; // b1-b5 - ptrRegs[REG_INDEX(BR2)] = context.BrS1; - ptrRegs[REG_INDEX(BR3)] = context.BrS2; - ptrRegs[REG_INDEX(BR4)] = context.BrS3; - ptrRegs[REG_INDEX(BR5)] = context.BrS4; - ptrRegs[REG_INDEX(BR6)] = context.BrT0; // b6-b7 - ptrRegs[REG_INDEX(BR7)] = context.BrT1; - - ptrRegs[REG_INDEX(AP_UNAT)] = context.ApUNAT; - ptrRegs[REG_INDEX(AP_LC)] = context.ApLC; - ptrRegs[REG_INDEX(AP_EC)] = context.ApEC; - ptrRegs[REG_INDEX(AP_CCV)] = context.ApCCV; - ptrRegs[REG_INDEX(AP_DCR)] = context.ApDCR; - - ptrRegs[REG_INDEX(RS_PFS)] = context.RsPFS; - ptrRegs[REG_INDEX(RS_BSP)] = context.RsBSP; - ptrRegs[REG_INDEX(RS_BSPSTORE)] = context.RsBSPSTORE; - ptrRegs[REG_INDEX(RS_RSC)] = context.RsRSC; - ptrRegs[REG_INDEX(RS_RNAT)] = context.RsRNAT; - - ptrRegs[REG_INDEX(ST_IPSR)] = context.StIPSR; - ptrRegs[REG_INDEX(ST_IIP)] = context.StIIP; - ptrRegs[REG_INDEX(ST_IFS)] = context.StIFS; - - ptrRegs[REG_INDEX(DB_I0)] = context.DbI0; - ptrRegs[REG_INDEX(DB_I1)] = context.DbI1; - ptrRegs[REG_INDEX(DB_I2)] = context.DbI2; - ptrRegs[REG_INDEX(DB_I3)] = context.DbI3; - ptrRegs[REG_INDEX(DB_I4)] = context.DbI4; - ptrRegs[REG_INDEX(DB_I5)] = context.DbI5; - ptrRegs[REG_INDEX(DB_I6)] = context.DbI6; - ptrRegs[REG_INDEX(DB_I7)] = context.DbI7; - - ptrRegs[REG_INDEX(DB_D0)] = context.DbD0; - ptrRegs[REG_INDEX(DB_D1)] = context.DbD1; - ptrRegs[REG_INDEX(DB_D2)] = context.DbD2; - ptrRegs[REG_INDEX(DB_D3)] = context.DbD3; - ptrRegs[REG_INDEX(DB_D4)] = context.DbD4; - ptrRegs[REG_INDEX(DB_D5)] = context.DbD5; - ptrRegs[REG_INDEX(DB_D6)] = context.DbD6; - ptrRegs[REG_INDEX(DB_D7)] = context.DbD7; - -#elif _M_IX86 +#ifdef _M_IX86 #define REG_INDEX(x) sun_jvm_hotspot_debugger_x86_X86ThreadContext_##x context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS; diff -r 454d7cc622ab -r d05ff4bf41b3 src/cpu/sparc/vm/c2_globals_sparc.hpp --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp Thu Feb 07 12:23:51 2013 -0800 @@ -42,7 +42,7 @@ #else define_pd_global(bool, ProfileInterpreter, true); #endif // CC_INTERP -define_pd_global(bool, TieredCompilation, trueInTiered); +define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, BackEdgeThreshold, 140000); diff -r 454d7cc622ab -r d05ff4bf41b3 src/cpu/x86/vm/c2_globals_x86.hpp --- a/src/cpu/x86/vm/c2_globals_x86.hpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp Thu Feb 07 12:23:51 2013 -0800 @@ -44,7 +44,7 @@ #else define_pd_global(bool, ProfileInterpreter, true); #endif // CC_INTERP -define_pd_global(bool, TieredCompilation, trueInTiered); +define_pd_global(bool, TieredCompilation, false); define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, BackEdgeThreshold, 100000); diff -r 454d7cc622ab -r d05ff4bf41b3 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/os/linux/vm/os_linux.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -1155,13 +1155,9 @@ // for initial thread if its stack size exceeds 6M. Cap it at 2M, // in case other parts in glibc still assumes 2M max stack size. // FIXME: alt signal stack is gone, maybe we can relax this constraint? -#ifndef IA64 - if (stack_size > 2 * K * K) stack_size = 2 * K * K; -#else // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small - if (stack_size > 4 * K * K) stack_size = 4 * K * K; -#endif - + if (stack_size > 2 * K * K IA64_ONLY(*2)) + stack_size = 2 * K * K IA64_ONLY(*2); // Try to figure out where the stack base (top) is. This is harder. // // When an application is started, glibc saves the initial stack pointer in @@ -4367,16 +4363,12 @@ if (is_NPTL()) { return pthread_cond_timedwait(_cond, _mutex, _abstime); } else { -#ifndef IA64 // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control // word back to default 64bit precision if condvar is signaled. Java // wants 53bit precision. Save and restore current value. int fpu = get_fpu_control_word(); -#endif // IA64 int status = pthread_cond_timedwait(_cond, _mutex, _abstime); -#ifndef IA64 set_fpu_control_word(fpu); -#endif // IA64 return status; } } diff -r 454d7cc622ab -r d05ff4bf41b3 src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/os/windows/vm/os_windows.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -349,6 +349,33 @@ #ifdef _M_IA64 // IA64 has memory and register stacks + // + // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit + // at thread creation (1MB backing store growing upwards, 1MB memory stack + // growing downwards, 2MB summed up) + // + // ... + // ------- top of stack (high address) ----- + // | + // | 1MB + // | Backing Store (Register Stack) + // | + // | / \ + // | | + // | | + // | | + // ------------------------ stack base ----- + // | 1MB + // | Memory Stack + // | + // | | + // | | + // | | + // | \ / + // | + // ----- bottom of stack (low address) ----- + // ... + stack_size = stack_size / 2; #endif return stack_bottom + stack_size; @@ -2005,17 +2032,34 @@ JavaThread* thread = JavaThread::current(); // Save pc in thread #ifdef _M_IA64 - thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP); + // Do not blow up if no thread info available. + if (thread) { + // Saving PRECISE pc (with slot information) in thread. + uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; + // Convert precise PC into "Unix" format + precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); + thread->set_saved_exception_pc((address)precise_pc); + } // Set pc to handler exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; + // Clear out psr.ri (= Restart Instruction) in order to continue + // at the beginning of the target bundle. + exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; + assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); #elif _M_AMD64 - thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip); + // Do not blow up if no thread info available. + if (thread) { + thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); + } // Set pc to handler exceptionInfo->ContextRecord->Rip = (DWORD64)handler; #else - thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip); + // Do not blow up if no thread info available. + if (thread) { + thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); + } // Set pc to handler - exceptionInfo->ContextRecord->Eip = (LONG)handler; + exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; #endif // Continue the execution @@ -2040,6 +2084,14 @@ // included or copied here. #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 +// Handle NAT Bit consumption on IA64. +#ifdef _M_IA64 +#define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION +#endif + +// Windows Vista/2008 heap corruption check +#define EXCEPTION_HEAP_CORRUPTION 0xC0000374 + #define def_excpt(val) #val, val struct siglabel { @@ -2082,6 +2134,10 @@ def_excpt(EXCEPTION_GUARD_PAGE), def_excpt(EXCEPTION_INVALID_HANDLE), def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), + def_excpt(EXCEPTION_HEAP_CORRUPTION), +#ifdef _M_IA64 + def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), +#endif NULL, 0 }; @@ -2206,7 +2262,14 @@ if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; #ifdef _M_IA64 - address pc = (address) exceptionInfo->ContextRecord->StIIP; + // On Itanium, we need the "precise pc", which has the slot number coded + // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). + address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; + // Convert the pc to "Unix format", which has the slot number coded + // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 + // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" + // information is saved in the Unix format. + address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); #elif _M_AMD64 address pc = (address) exceptionInfo->ContextRecord->Rip; #else @@ -2321,29 +2384,40 @@ if (exception_code == EXCEPTION_STACK_OVERFLOW) { if (os::uses_stack_guard_pages()) { #ifdef _M_IA64 - // - // If it's a legal stack address continue, Windows will map it in. - // + // Use guard page for register stack. PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; address addr = (address) exceptionRecord->ExceptionInformation[1]; - if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) - return EXCEPTION_CONTINUE_EXECUTION; - - // The register save area is the same size as the memory stack - // and starts at the page just above the start of the memory stack. - // If we get a fault in this area, we've run out of register - // stack. If we are in java, try throwing a stack overflow exception. - if (addr > thread->stack_base() && - addr <= (thread->stack_base()+thread->stack_size()) ) { - char buf[256]; - jio_snprintf(buf, sizeof(buf), - "Register stack overflow, addr:%p, stack_base:%p\n", - addr, thread->stack_base() ); - tty->print_raw_cr(buf); - // If not in java code, return and hope for the best. - return in_java ? Handle_Exception(exceptionInfo, - SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) - : EXCEPTION_CONTINUE_EXECUTION; + // Check for a register stack overflow on Itanium + if (thread->addr_inside_register_stack_red_zone(addr)) { + // Fatal red zone violation happens if the Java program + // catches a StackOverflow error and does so much processing + // that it runs beyond the unprotected yellow guard zone. As + // a result, we are out of here. + fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); + } else if(thread->addr_inside_register_stack(addr)) { + // Disable the yellow zone which sets the state that + // we've got a stack overflow problem. + if (thread->stack_yellow_zone_enabled()) { + thread->disable_stack_yellow_zone(); + } + // Give us some room to process the exception. + thread->disable_register_stack_guard(); + // Tracing with +Verbose. + if (Verbose) { + tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); + tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); + tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); + tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", + thread->register_stack_base(), + thread->register_stack_base() + thread->stack_size()); + } + + // Reguard the permanent register stack red zone just to be sure. + // We saw Windows silently disabling this without telling us. + thread->enable_register_stack_red_zone(); + + return Handle_Exception(exceptionInfo, + SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); } #endif if (thread->stack_yellow_zone_enabled()) { @@ -2418,50 +2492,33 @@ { // Null pointer exception. #ifdef _M_IA64 - // We catch register stack overflows in compiled code by doing - // an explicit compare and executing a st8(G0, G0) if the - // BSP enters into our guard area. We test for the overflow - // condition and fall into the normal null pointer exception - // code if BSP hasn't overflowed. - if ( in_java ) { - if(thread->register_stack_overflow()) { - assert((address)exceptionInfo->ContextRecord->IntS3 == - thread->register_stack_limit(), - "GR7 doesn't contain register_stack_limit"); - // Disable the yellow zone which sets the state that - // we've got a stack overflow problem. - if (thread->stack_yellow_zone_enabled()) { - thread->disable_stack_yellow_zone(); + // Process implicit null checks in compiled code. Note: Implicit null checks + // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. + if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { + CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); + // Handle implicit null check in UEP method entry + if (cb && (cb->is_frame_complete_at(pc) || + (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { + if (Verbose) { + intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); + tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); + tty->print_cr(" to addr " INTPTR_FORMAT, addr); + tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", + *(bundle_start + 1), *bundle_start); } - // Give us some room to process the exception - thread->disable_register_stack_guard(); - // Update GR7 with the new limit so we can continue running - // compiled code. - exceptionInfo->ContextRecord->IntS3 = - (ULONGLONG)thread->register_stack_limit(); return Handle_Exception(exceptionInfo, - SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); - } else { - // - // Check for implicit null - // We only expect null pointers in the stubs (vtable) - // the rest are checked explicitly now. - // - if (((uintptr_t)addr) < os::vm_page_size() ) { - // an access to the first page of VM--assume it is a null pointer - address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); - if (stub != NULL) return Handle_Exception(exceptionInfo, stub); - } + SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); } - } // in_java - - // IA64 doesn't use implicit null checking yet. So we shouldn't - // get here. - tty->print_raw_cr("Access violation, possible null pointer exception"); + } + + // Implicit null checks were processed above. Hence, we should not reach + // here in the usual case => die! + if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord); return EXCEPTION_CONTINUE_SEARCH; -#else /* !IA64 */ + +#else // !IA64 // Windows 98 reports faulting addresses incorrectly if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || @@ -2493,7 +2550,24 @@ report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord); return EXCEPTION_CONTINUE_SEARCH; - } + } // /EXCEPTION_ACCESS_VIOLATION + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +#if defined _M_IA64 + else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || + exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { + M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); + + // Compiled method patched to be non entrant? Following conditions must apply: + // 1. must be first instruction in bundle + // 2. must be a break instruction with appropriate code + if((((uint64_t) pc & 0x0F) == 0) && + (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { + return Handle_Exception(exceptionInfo, + (address)SharedRuntime::get_handle_wrong_method_stub()); + } + } // /EXCEPTION_ILLEGAL_INSTRUCTION +#endif + if (in_java) { switch (exception_code) { diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/adlc/adlparse.cpp --- a/src/share/vm/adlc/adlparse.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/adlc/adlparse.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -168,7 +168,7 @@ // Check for block delimiter if ( (_curchar != '%') || ( next_char(), (_curchar != '{')) ) { - parse_err(SYNERR, "missing '%{' in instruction definition\n"); + parse_err(SYNERR, "missing '%%{' in instruction definition\n"); return; } next_char(); // Maintain the invariant @@ -253,7 +253,7 @@ } while(_curchar != '%'); next_char(); if (_curchar != '}') { - parse_err(SYNERR, "missing '%}' in instruction definition\n"); + parse_err(SYNERR, "missing '%%}' in instruction definition\n"); return; } // Check for "Set" form of chain rule @@ -423,7 +423,7 @@ skipws(); // Check for block delimiter if ((_curchar != '%') || (*(_ptr+1) != '{')) { // If not open block - parse_err(SYNERR, "missing '%c{' in operand definition\n","%"); + parse_err(SYNERR, "missing '%%{' in operand definition\n"); return; } next_char(); next_char(); // Skip over "%{" symbol @@ -483,7 +483,7 @@ } while(_curchar != '%'); next_char(); if (_curchar != '}') { - parse_err(SYNERR, "missing '%}' in operand definition\n"); + parse_err(SYNERR, "missing '%%}' in operand definition\n"); return; } // Add operand to tail of operand list @@ -1324,7 +1324,7 @@ // Check for block delimiter if ( (_curchar != '%') || ( next_char(), (_curchar != '{')) ) { - parse_err(SYNERR, "missing '%{' in pipeline definition\n"); + parse_err(SYNERR, "missing '%%{' in pipeline definition\n"); return; } next_char(); // Maintain the invariant @@ -1341,7 +1341,7 @@ skipws(); if ( (_curchar != '%') || ( next_char(), (_curchar != '{')) ) { - parse_err(SYNERR, "expected '%{'\n"); + parse_err(SYNERR, "expected '%%{'\n"); return; } next_char(); skipws(); @@ -1397,7 +1397,7 @@ skipws(); if ( (_curchar != '%') || ( next_char(), (_curchar != '{')) ) { - parse_err(SYNERR, "expected '%{'\n"); + parse_err(SYNERR, "expected '%%{'\n"); return; } next_char(); skipws(); @@ -1586,7 +1586,7 @@ if ( (_curchar != '%') || ( next_char(), (_curchar != '}')) ) { - parse_err(SYNERR, "expected '%}', found \"%c\"\n", _curchar); + parse_err(SYNERR, "expected '%%}', found \"%c\"\n", _curchar); } next_char(); skipws(); @@ -1612,7 +1612,7 @@ next_char(); if (_curchar != '}') { - parse_err(SYNERR, "missing \"%}\" in pipeline definition\n"); + parse_err(SYNERR, "missing \"%%}\" in pipeline definition\n"); return; } @@ -1775,7 +1775,7 @@ // Check for block delimiter if ( (_curchar != '%') || ( next_char(), (_curchar != '{')) ) { - parse_err(SYNERR, "missing \"%{\" in pipe_class definition\n"); + parse_err(SYNERR, "missing \"%%{\" in pipe_class definition\n"); return; } next_char(); @@ -2062,7 +2062,7 @@ next_char(); if (_curchar != '}') { - parse_err(SYNERR, "missing \"%}\" in pipe_class definition\n"); + parse_err(SYNERR, "missing \"%%}\" in pipe_class definition\n"); return; } @@ -3341,12 +3341,12 @@ char *disp = NULL; if (_curchar != '%') { - parse_err(SYNERR, "Missing '%{' for 'interface' block.\n"); + parse_err(SYNERR, "Missing '%%{' for 'interface' block.\n"); return NULL; } next_char(); // Skip '%' if (_curchar != '{') { - parse_err(SYNERR, "Missing '%{' for 'interface' block.\n"); + parse_err(SYNERR, "Missing '%%{' for 'interface' block.\n"); return NULL; } next_char(); // Skip '{' @@ -3354,7 +3354,7 @@ do { char *field = get_ident(); if (field == NULL) { - parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%}' ending interface.\n"); + parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n"); return NULL; } if ( strcmp(field,"base") == 0 ) { @@ -3370,13 +3370,13 @@ disp = interface_field_parse(); } else { - parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%}' ending interface.\n"); + parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n"); return NULL; } } while( _curchar != '%' ); next_char(); // Skip '%' if ( _curchar != '}' ) { - parse_err(SYNERR, "Missing '%}' for 'interface' block.\n"); + parse_err(SYNERR, "Missing '%%}' for 'interface' block.\n"); return NULL; } next_char(); // Skip '}' @@ -3403,12 +3403,12 @@ const char *greater_format = "gt"; if (_curchar != '%') { - parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n"); + parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n"); return NULL; } next_char(); // Skip '%' if (_curchar != '{') { - parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n"); + parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n"); return NULL; } next_char(); // Skip '{' @@ -3416,7 +3416,7 @@ do { char *field = get_ident(); if (field == NULL) { - parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%}' ending interface.\n"); + parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n"); return NULL; } if ( strcmp(field,"equal") == 0 ) { @@ -3438,13 +3438,13 @@ greater = interface_field_parse(&greater_format); } else { - parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%}' ending interface.\n"); + parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n"); return NULL; } } while( _curchar != '%' ); next_char(); // Skip '%' if ( _curchar != '}' ) { - parse_err(SYNERR, "Missing '%}' for 'interface' block.\n"); + parse_err(SYNERR, "Missing '%%}' for 'interface' block.\n"); return NULL; } next_char(); // Skip '}' @@ -3543,7 +3543,7 @@ } else if ((cnstr = find_cpp_block("match constructor")) == NULL ) { parse_err(SYNERR, "invalid construction of match rule\n" - "Missing ';' or invalid '%{' and '%}' constructor\n"); + "Missing ';' or invalid '%%{' and '%%}' constructor\n"); return NULL; // No MatchRule to return } if (_AD._adl_debug > 1) @@ -3646,7 +3646,7 @@ // Check for closing '"' and '%}' in format description skipws(); // Move to closing '%}' if ( _curchar != '%' ) { - parse_err(SYNERR, "non-blank characters between closing '\"' and '%' in format"); + parse_err(SYNERR, "non-blank characters between closing '\"' and '%%' in format"); return NULL; } } // Done with format description inside @@ -3654,7 +3654,7 @@ skipws(); // Past format description, at '%' if ( _curchar != '%' || *(_ptr+1) != '}' ) { - parse_err(SYNERR, "missing '%}' at end of format block"); + parse_err(SYNERR, "missing '%%}' at end of format block"); return NULL; } next_char(); // Move past the '%' @@ -3785,7 +3785,7 @@ skipws(); // Past format description, at '%' if ( _curchar != '%' || *(_ptr+1) != '}' ) { - parse_err(SYNERR, "missing '%}' at end of format block"); + parse_err(SYNERR, "missing '%%}' at end of format block"); return NULL; } next_char(); // Move past the '%' @@ -3834,7 +3834,7 @@ skipws(); // Skip leading whitespace if ((_curchar != '%') || (next_char(), (_curchar != '{')) ) { // If not open block - parse_err(SYNERR, "missing '%{' in expand definition\n"); + parse_err(SYNERR, "missing '%%{' in expand definition\n"); return(NULL); } next_char(); // Maintain the invariant @@ -3933,7 +3933,7 @@ } while(_curchar != '%'); next_char(); if (_curchar != '}') { - parse_err(SYNERR, "missing '%}' in expand rule definition\n"); + parse_err(SYNERR, "missing '%%}' in expand rule definition\n"); return(NULL); } next_char(); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -3667,11 +3667,12 @@ } // now perform tests that are based on flag settings - if (callee->force_inline() || callee->should_inline()) { - // ignore heuristic controls on inlining - if (callee->force_inline()) - print_inlining(callee, "force inline by annotation"); + if (callee->force_inline()) { + print_inlining(callee, "force inline by annotation"); + } else if (callee->should_inline()) { + print_inlining(callee, "force inline by CompileOracle"); } else { + // use heuristic controls on inlining if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/c1/c1_Instruction.cpp --- a/src/share/vm/c1/c1_Instruction.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/c1/c1_Instruction.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -188,7 +188,7 @@ ciType* LoadIndexed::declared_type() const { ciType* array_type = array()->declared_type(); - if (array_type == NULL) { + if (array_type == NULL || !array_type->is_loaded()) { return NULL; } assert(array_type->is_array_klass(), "what else?"); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/ci/ciEnv.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -1168,7 +1168,7 @@ void ciEnv::dump_replay_data(outputStream* out) { ASSERT_IN_VM; - + ResourceMark rm; #if INCLUDE_JVMTI out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables); out->print_cr("JvmtiExport can_hotswap_or_post_breakpoint %d", _jvmti_can_hotswap_or_post_breakpoint); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/ci/ciInstanceKlass.cpp --- a/src/share/vm/ci/ciInstanceKlass.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -580,6 +580,7 @@ } void do_field(fieldDescriptor* fd) { if (fd->is_final() && !fd->has_initial_value()) { + ResourceMark rm; oop mirror = fd->field_holder()->java_mirror(); _out->print("staticfield %s %s %s ", _holder, fd->name()->as_quoted_ascii(), fd->signature()->as_quoted_ascii()); switch (fd->field_type()) { @@ -643,6 +644,8 @@ void ciInstanceKlass::dump_replay_data(outputStream* out) { ASSERT_IN_VM; + ResourceMark rm; + InstanceKlass* ik = get_instanceKlass(); ConstantPool* cp = ik->constants(); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/ci/ciMethod.cpp --- a/src/share/vm/ci/ciMethod.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/ci/ciMethod.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -977,7 +977,7 @@ // ciMethod::set_not_compilable // // Tell the VM that this method cannot be compiled at all. -void ciMethod::set_not_compilable() { +void ciMethod::set_not_compilable(const char* reason) { check_is_loaded(); VM_ENTRY_MARK; ciEnv* env = CURRENT_ENV; @@ -986,7 +986,7 @@ } else { _is_c2_compilable = false; } - get_Method()->set_not_compilable(env->comp_level()); + get_Method()->set_not_compilable(env->comp_level(), true, reason); } // ------------------------------------------------------------------ @@ -1178,6 +1178,7 @@ void ciMethod::dump_replay_data(outputStream* st) { ASSERT_IN_VM; + ResourceMark rm; Method* method = get_Method(); Klass* holder = method->method_holder(); st->print_cr("ciMethod %s %s %s %d %d %d %d %d", diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/ci/ciMethod.hpp Thu Feb 07 12:23:51 2013 -0800 @@ -252,7 +252,7 @@ bool has_option(const char *option); bool can_be_compiled(); bool can_be_osr_compiled(int entry_bci); - void set_not_compilable(); + void set_not_compilable(const char* reason = NULL); bool has_compiled_code(); void log_nmethod_identity(xmlStream* log); bool is_not_reached(int bci); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/ci/ciMethodData.cpp --- a/src/share/vm/ci/ciMethodData.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/ci/ciMethodData.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -374,6 +374,7 @@ void ciMethodData::dump_replay_data(outputStream* out) { ASSERT_IN_VM; + ResourceMark rm; MethodData* mdo = get_MethodData(); Method* method = mdo->method(); Klass* holder = method->method_holder(); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -1398,7 +1398,7 @@ method->print_short_name(tty); tty->cr(); } - method->set_not_compilable_quietly(); + method->set_not_compilable(CompLevel_all, !quietly, "excluded by CompilerOracle"); } return false; diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -3099,9 +3099,9 @@ tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); #endif -#if defined(IA64) && !defined(ZERO) +#if !defined(ZERO) tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); -#endif // IA64 && !ZERO +#endif // !ZERO tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/oops/method.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -700,7 +700,7 @@ } -void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report) { +void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason) { if (PrintCompilation && report) { ttyLocker ttyl; tty->print("made not %scompilable on ", is_osr ? "OSR " : ""); @@ -714,14 +714,21 @@ } this->print_short_name(tty); int size = this->code_size(); - if (size > 0) + if (size > 0) { tty->print(" (%d bytes)", size); + } + if (reason != NULL) { + tty->print(" %s", reason); + } tty->cr(); } if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) { ttyLocker ttyl; xtty->begin_elem("make_not_%scompilable thread='" UINTX_FORMAT "'", is_osr ? "osr_" : "", os::current_thread_id()); + if (reason != NULL) { + xtty->print(" reason=\'%s\'", reason); + } xtty->method(this); xtty->stamp(); xtty->end_elem(); @@ -743,8 +750,8 @@ } // call this when compiler finds that this method is not compilable -void Method::set_not_compilable(int comp_level, bool report) { - print_made_not_compilable(comp_level, /*is_osr*/ false, report); +void Method::set_not_compilable(int comp_level, bool report, const char* reason) { + print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason); if (comp_level == CompLevel_all) { set_not_c1_compilable(); set_not_c2_compilable(); @@ -769,8 +776,8 @@ return false; } -void Method::set_not_osr_compilable(int comp_level, bool report) { - print_made_not_compilable(comp_level, /*is_osr*/ true, report); +void Method::set_not_osr_compilable(int comp_level, bool report, const char* reason) { + print_made_not_compilable(comp_level, /*is_osr*/ true, report, reason); if (comp_level == CompLevel_all) { set_not_c1_osr_compilable(); set_not_c2_osr_compilable(); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/oops/method.hpp Thu Feb 07 12:23:51 2013 -0800 @@ -764,18 +764,18 @@ // whether it is not compilable for another reason like having a // breakpoint set in it. bool is_not_compilable(int comp_level = CompLevel_any) const; - void set_not_compilable(int comp_level = CompLevel_all, bool report = true); + void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); void set_not_compilable_quietly(int comp_level = CompLevel_all) { set_not_compilable(comp_level, false); } bool is_not_osr_compilable(int comp_level = CompLevel_any) const; - void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true); + void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { set_not_osr_compilable(comp_level, false); } private: - void print_made_not_compilable(int comp_level, bool is_osr, bool report); + void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); public: bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/oops/methodData.hpp --- a/src/share/vm/oops/methodData.hpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/oops/methodData.hpp Thu Feb 07 12:23:51 2013 -0800 @@ -1469,7 +1469,7 @@ void inc_decompile_count() { _nof_decompiles += 1; if (decompile_count() > (uint)PerMethodRecompilationCutoff) { - method()->set_not_compilable(CompLevel_full_optimization); + method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff"); } } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/oops/symbol.cpp --- a/src/share/vm/oops/symbol.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/oops/symbol.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -152,6 +152,7 @@ } void Symbol::print_symbol_on(outputStream* st) const { + ResourceMark rm; st = st ? st : tty; st->print("%s", as_quoted_ascii()); } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/opto/bytecodeInfo.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -420,14 +420,24 @@ } //------------------------------print_inlining--------------------------------- -// Really, the failure_msg can be a success message also. -void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { - C->print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline"); - if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); - if (Verbose && callee_method) { - const InlineTree *top = this; - while( top->caller_tree() != NULL ) { top = top->caller_tree(); } - //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); +void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, + const char* msg, bool success) const { + assert(msg != NULL, "just checking"); + if (C->log() != NULL) { + if (success) { + C->log()->inline_success(msg); + } else { + C->log()->inline_fail(msg); + } + } + if (PrintInlining) { + C->print_inlining(callee_method, inline_level(), caller_bci, msg); + if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); + if (Verbose && callee_method) { + const InlineTree *top = this; + while( top->caller_tree() != NULL ) { top = top->caller_tree(); } + //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); + } } } @@ -451,23 +461,23 @@ // Do some initial checks. if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { - if (PrintInlining) print_inlining(callee_method, caller_bci, "failed initial checks"); + print_inlining(callee_method, caller_bci, "failed initial checks", + false /* !success */); return NULL; } // Do some parse checks. failure_msg = check_can_parse(callee_method); if (failure_msg != NULL) { - if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg); + print_inlining(callee_method, caller_bci, failure_msg, + false /* !success */); return NULL; } // Check if inlining policy says no. WarmCallInfo wci = *(initial_wci); - failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci, should_delay); - if (failure_msg != NULL && C->log() != NULL) { - C->log()->inline_fail(failure_msg); - } + failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, + &wci, should_delay); #ifndef PRODUCT if (UseOldInlining && InlineWarmCalls @@ -487,7 +497,7 @@ wci = *(WarmCallInfo::always_hot()); else wci = *(WarmCallInfo::always_cold()); - } + } if (!InlineWarmCalls) { if (!wci.is_cold() && !wci.is_hot()) { // Do not inline the warm calls. @@ -496,11 +506,10 @@ } if (!wci.is_cold()) { - // In -UseOldInlining, the failure_msg may also be a success message. - if (failure_msg == NULL) failure_msg = "inline (hot)"; - // Inline! - if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg); + print_inlining(callee_method, caller_bci, + failure_msg ? failure_msg : "inline (hot)", + true /* success */); if (UseOldInlining) build_inline_tree_for_callee(callee_method, jvms, caller_bci); if (InlineWarmCalls && !wci.is_hot()) @@ -509,8 +518,9 @@ } // Do not inline - if (failure_msg == NULL) failure_msg = "too cold to inline"; - if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg); + print_inlining(callee_method, caller_bci, + failure_msg ? failure_msg : "too cold to inline", + false /* !success */ ); return NULL; } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/opto/callGenerator.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -305,11 +305,13 @@ void LateInlineCallGenerator::do_late_inline() { // Can't inline it if (call_node() == NULL || call_node()->outcnt() == 0 || - call_node()->in(0) == NULL || call_node()->in(0)->is_top()) + call_node()->in(0) == NULL || call_node()->in(0)->is_top()) { return; + } + const TypeTuple *r = call_node()->tf()->domain(); for (int i1 = 0; i1 < method()->arg_size(); i1++) { - if (call_node()->in(TypeFunc::Parms + i1)->is_top()) { + if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); return; } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/opto/generateOptoStub.cpp --- a/src/share/vm/opto/generateOptoStub.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/opto/generateOptoStub.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -88,12 +88,12 @@ thread, in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset())); -#if defined(SPARC) || defined(IA64) +#if defined(SPARC) Node* adr_flags = basic_plus_adr(top(), thread, in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); -#endif /* defined(SPARC) || defined(IA64) */ +#endif /* defined(SPARC) */ // Drop in the last_Java_sp. last_Java_fp is not touched. @@ -102,10 +102,8 @@ // users will look at the other fields. // Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset())); -#ifndef IA64 Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS); store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias); -#endif // Set _thread_in_native // The order of stores into TLS is critical! Setting _thread_in_native MUST @@ -210,19 +208,12 @@ //----------------------------- // Clear last_Java_sp -#ifdef IA64 - if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease); -#endif - store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias); -#ifdef IA64 - if (os::is_MP() && UseMembar) insert_mem_bar(new MemBarVolatileNode()); -#endif // def IA64 // Clear last_Java_pc and (optionally)_flags store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias); -#if defined(SPARC) || defined(IA64) +#if defined(SPARC) store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias); -#endif /* defined(SPARC) || defined(IA64) */ +#endif /* defined(SPARC) */ #ifdef IA64 Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset())); if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease); diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/opto/parse.hpp --- a/src/share/vm/opto/parse.hpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/opto/parse.hpp Thu Feb 07 12:23:51 2013 -0800 @@ -73,7 +73,8 @@ const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay); const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; - void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const; + void print_inlining(ciMethod* callee_method, int caller_bci, + const char* msg, bool success) const; InlineTree *caller_tree() const { return _caller_tree; } InlineTree* callee_at(int bci, ciMethod* m) const; diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/opto/parse3.cpp --- a/src/share/vm/opto/parse3.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/opto/parse3.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -487,7 +487,8 @@ fun, NULL, TypeRawPtr::BOTTOM, makecon(TypeKlassPtr::make(array_klass)), length[0], length[1], length[2], - length[3], length[4]); + (ndimensions > 2) ? length[3] : NULL, + (ndimensions > 3) ? length[4] : NULL); } else { // Create a java array for dimension sizes Node* dims = NULL; diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/runtime/deoptimization.cpp --- a/src/share/vm/runtime/deoptimization.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/runtime/deoptimization.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -1559,7 +1559,7 @@ if (trap_method() == nm->method()) { make_not_compilable = true; } else { - trap_method->set_not_compilable(CompLevel_full_optimization); + trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff"); // But give grace to the enclosing nm->method(). } } diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/runtime/os.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -985,15 +985,28 @@ // if C stack is walkable beyond current frame. The check for fp() is not // necessary on Sparc, but it's harmless. bool os::is_first_C_frame(frame* fr) { -#ifdef IA64 - // In order to walk native frames on Itanium, we need to access the unwind - // table, which is inside ELF. We don't want to parse ELF after fatal error, - // so return true for IA64. If we need to support C stack walking on IA64, - // this function needs to be moved to CPU specific files, as fp() on IA64 - // is register stack, which grows towards higher memory address. +#if defined(IA64) && !defined(_WIN32) + // On IA64 we have to check if the callers bsp is still valid + // (i.e. within the register stack bounds). + // Notice: this only works for threads created by the VM and only if + // we walk the current stack!!! If we want to be able to walk + // arbitrary other threads, we'll have to somehow store the thread + // object in the frame. + Thread *thread = Thread::current(); + if ((address)fr->fp() <= + thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) { + // This check is a little hacky, because on Linux the first C + // frame's ('start_thread') register stack frame starts at + // "register_stack_base + 0x48" while on HPUX, the first C frame's + // ('__pthread_bound_body') register stack frame seems to really + // start at "register_stack_base". + return true; + } else { + return false; + } +#elif defined(IA64) && defined(_WIN32) return true; -#endif - +#else // Load up sp, fp, sender sp and sender fp, check for reasonable values. // Check usp first, because if that's bad the other accessors may fault // on some architectures. Ditto ufp second, etc. @@ -1023,6 +1036,7 @@ if (old_fp - ufp > 64 * K) return true; return false; +#endif } #ifdef ASSERT diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/runtime/sharedRuntime.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -2816,10 +2816,6 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) ) -#ifdef IA64 - ShouldNotReachHere(); // NYI -#endif /* IA64 */ - // // This code is dependent on the memory layout of the interpreter local // array and the monitors. On all of our platforms the layout is identical diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/runtime/synchronizer.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -53,7 +53,7 @@ # include "os_bsd.inline.hpp" #endif -#if defined(__GNUC__) && !defined(IA64) +#if defined(__GNUC__) // Need to inhibit inlining for older versions of GCC to avoid build-time failures #define ATTR __attribute__((noinline)) #else diff -r 454d7cc622ab -r d05ff4bf41b3 src/share/vm/runtime/vframeArray.cpp --- a/src/share/vm/runtime/vframeArray.cpp Wed Feb 06 15:22:32 2013 -0800 +++ b/src/share/vm/runtime/vframeArray.cpp Thu Feb 07 12:23:51 2013 -0800 @@ -233,8 +233,6 @@ // Force early return from top frame after deoptimization #ifndef CC_INTERP pc = Interpreter::remove_activation_early_entry(state->earlyret_tos()); -#else - // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64) #endif } else { // Possibly override the previous pc computation of the top (youngest) frame diff -r 454d7cc622ab -r d05ff4bf41b3 test/compiler/8004741/Test8004741.java --- a/test/compiler/8004741/Test8004741.java Wed Feb 06 15:22:32 2013 -0800 +++ b/test/compiler/8004741/Test8004741.java Thu Feb 07 12:23:51 2013 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,70 +25,160 @@ * @test Test8004741.java * @bug 8004741 * @summary Missing compiled exception handle table entry for multidimensional array allocation + * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers -XX:+SafepointALot -XX:GuaranteedSafepointInterval=100 Test8004741 * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers Test8004741 - * */ import java.util.*; public class Test8004741 extends Thread { + static int passed = 0; + + /** + * Loop forever allocating 2-d arrays. + * Catches and rethrows all exceptions; in the case of ThreadDeath, increments passed. + * Note that passed is incremented here because this is the exception handler with + * the smallest scope; we only want to declare success in the case where it is highly + * likely that the test condition + * (exception in 2-d array alloc interrupted by ThreadDeath) + * actually occurs. + */ static int[][] test(int a, int b) throws Exception { - int[][] ar = null; + int[][] ar; try { ar = new int[a][b]; - } catch (Error e) { - System.out.println("test got Error"); - passed = true; - throw(e); - } catch (Exception e) { - System.out.println("test got Exception"); + } catch (ThreadDeath e) { + System.out.println("test got ThreadDeath"); + passed++; throw(e); } return ar; } - static boolean passed = false; + /* Cookbook wait-notify to track progress of test thread. */ + Object progressLock = new Object(); + private static final int NOT_STARTED = 0; + private static final int RUNNING = 1; + private static final int STOPPING = 2; + + int progressState = NOT_STARTED; - public void run() { - System.out.println("test started"); - try { - while(true) { - test(2,20000); + void toState(int state) { + synchronized (progressLock) { + progressState = state; + progressLock.notify(); + } + } + + void waitFor(int state) { + synchronized (progressLock) { + while (progressState < state) { + try { + progressLock.wait(); + } catch (InterruptedException e) { + e.printStackTrace(); + System.out.println("unexpected InterruptedException"); + fail(); } - } catch (ThreadDeath e) { - System.out.println("test got ThreadDeath"); - passed = true; - } catch (Error e) { - e.printStackTrace(); - System.out.println("test got Error"); - } catch (Exception e) { - e.printStackTrace(); - System.out.println("test got Exception"); + } + if (progressState > state) { + System.out.println("unexpected test state change, expected " + + state + " but saw " + progressState); + fail(); + } + } + } + + /** + * Loops running test until some sort of an exception or error, + * expects to see ThreadDeath. + */ + public void run() { + try { + // Print before state change, so that other thread is most likely + // to see this thread executing calls to test() in a loop. + System.out.println("thread running"); + toState(RUNNING); + while (true) { + // (2,2) (2,10) (2,100) were observed to tickle the bug; + test(2, 100); } + } catch (ThreadDeath e) { + // nothing to say, passing was incremented by the test. + } catch (Throwable e) { + e.printStackTrace(); + System.out.println("unexpected Throwable " + e); + fail(); + } + toState(STOPPING); + } + + /** + * Runs a single trial of the test in a thread. + * No single trial is definitive, since the ThreadDeath + * exception might not land in the tested region of code. + */ + public static void threadTest() throws InterruptedException { + Test8004741 t = new Test8004741(); + t.start(); + t.waitFor(RUNNING); + Thread.sleep(100); + System.out.println("stopping thread"); + t.stop(); + t.waitFor(STOPPING); + t.join(); } public static void main(String[] args) throws Exception { + // Warm up "test" + // t will never be started. for (int n = 0; n < 11000; n++) { - test(2, 20); + test(2, 100); + } + + // Will this sleep help ensure that the compiler is run? + Thread.sleep(500); + passed = 0; + + try { + test(-1, 100); + System.out.println("Missing NegativeArraySizeException #1"); + fail(); + } catch ( java.lang.NegativeArraySizeException e ) { + System.out.println("Saw expected NegativeArraySizeException #1"); } - // First test exception catch - Test8004741 t = new Test8004741(); + try { + test(100, -1); + fail(); + System.out.println("Missing NegativeArraySizeException #2"); + fail(); + } catch ( java.lang.NegativeArraySizeException e ) { + System.out.println("Saw expected NegativeArraySizeException #2"); + } - passed = false; - t.start(); - Thread.sleep(1000); - t.stop(); + /* Test repetitions. If the test succeeds-mostly, it succeeds, + * as long as it does not crash (the outcome if the exception range + * table entry for the array allocation is missing). + */ + int N = 12; + for (int n = 0; n < N; n++) { + threadTest(); + } - Thread.sleep(5000); - t.join(); - if (passed) { + if (passed > N/2) { + System.out.println("Saw " + passed + " out of " + N + " possible ThreadDeath hits"); System.out.println("PASSED"); } else { - System.out.println("FAILED"); - System.exit(97); + System.out.println("Too few ThreadDeath hits; expected at least " + N/2 + + " but saw only " + passed); + fail(); } } + static void fail() { + System.out.println("FAILED"); + System.exit(97); + } };