# HG changeset patch # User coleenp # Date 1259769589 28800 # Node ID 0018cf203583bdfdaa0d2d4873e204b63428ffd1 # Parent 4b966d9946a3a171a135f4e90a632292075af54f# Parent 6400f475effea5ad14a70b450c9503311f75545e Merge diff -r 4b966d9946a3 -r 0018cf203583 src/cpu/zero/vm/cppInterpreter_zero.cpp --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -204,6 +204,20 @@ goto unwind_and_return; } + // Update the invocation counter + if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) { + thread->set_do_not_unlock(); + InvocationCounter *counter = method->invocation_counter(); + counter->increment(); + if (counter->reached_InvocationLimit()) { + CALL_VM_NOCHECK( + InterpreterRuntime::frequency_counter_overflow(thread, NULL)); + if (HAS_PENDING_EXCEPTION) + goto unwind_and_return; + } + thread->clr_do_not_unlock(); + } + // Lock if necessary BasicObjectLock *monitor; monitor = NULL; diff -r 4b966d9946a3 -r 0018cf203583 src/cpu/zero/vm/frame_zero.cpp --- a/src/cpu/zero/vm/frame_zero.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/cpu/zero/vm/frame_zero.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -36,11 +36,8 @@ return zeroframe()->is_interpreter_frame(); } -bool frame::is_fake_stub_frame() const { - return zeroframe()->is_fake_stub_frame(); -} - frame frame::sender_for_entry_frame(RegisterMap *map) const { + assert(zeroframe()->is_entry_frame(), "wrong type of frame"); assert(map != NULL, "map must be set"); assert(!entry_frame_is_first(), "next Java fp must be non zero"); assert(entry_frame_call_wrapper()->anchor()->last_Java_sp() == sender_sp(), @@ -50,15 +47,10 @@ return frame(sender_sp(), sp() + 1); } -frame frame::sender_for_interpreter_frame(RegisterMap *map) const { - return frame(sender_sp(), sp() + 1); -} - -frame frame::sender_for_compiled_frame(RegisterMap *map) const { - return frame(sender_sp(), sp() + 1); -} - -frame frame::sender_for_fake_stub_frame(RegisterMap *map) const { +frame frame::sender_for_nonentry_frame(RegisterMap *map) const { + assert(zeroframe()->is_interpreter_frame() || + zeroframe()->is_shark_frame() || + zeroframe()->is_fake_stub_frame(), "wrong type of frame"); return frame(sender_sp(), sp() + 1); } @@ -69,17 +61,8 @@ if (is_entry_frame()) return sender_for_entry_frame(map); - - if (is_interpreted_frame()) - return sender_for_interpreter_frame(map); - - if (is_compiled_frame()) - return sender_for_compiled_frame(map); - - if (is_fake_stub_frame()) - return sender_for_fake_stub_frame(map); - - ShouldNotReachHere(); + else + return sender_for_nonentry_frame(map); } #ifdef CC_INTERP diff -r 4b966d9946a3 -r 0018cf203583 src/cpu/zero/vm/frame_zero.hpp --- a/src/cpu/zero/vm/frame_zero.hpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/cpu/zero/vm/frame_zero.hpp Wed Dec 02 07:59:49 2009 -0800 @@ -65,10 +65,7 @@ } public: - bool is_fake_stub_frame() const; - - public: - frame sender_for_fake_stub_frame(RegisterMap* map) const; + frame sender_for_nonentry_frame(RegisterMap* map) const; public: void zero_print_on_error(int index, diff -r 4b966d9946a3 -r 0018cf203583 src/cpu/zero/vm/globals_zero.hpp --- a/src/cpu/zero/vm/globals_zero.hpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/cpu/zero/vm/globals_zero.hpp Wed Dec 02 07:59:49 2009 -0800 @@ -36,7 +36,6 @@ define_pd_global(intx, CodeEntryAlignment, 32); define_pd_global(intx, InlineFrequencyCount, 100); -define_pd_global(intx, InlineSmallCode, 1000); define_pd_global(intx, PreInflateSpin, 10); define_pd_global(intx, StackYellowPages, 2); diff -r 4b966d9946a3 -r 0018cf203583 src/cpu/zero/vm/sharedRuntime_zero.cpp --- a/src/cpu/zero/vm/sharedRuntime_zero.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2007, 2008 Red Hat, Inc. + * Copyright 2007, 2008, 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,14 @@ BasicType *in_sig_bt, VMRegPair *in_regs, BasicType ret_type) { +#ifdef SHARK + return SharkCompiler::compiler()->generate_native_wrapper(masm, + method, + in_sig_bt, + ret_type); +#else ShouldNotCallThis(); +#endif // SHARK } int Deoptimization::last_frame_adjust(int callee_parameters, diff -r 4b966d9946a3 -r 0018cf203583 src/cpu/zero/vm/sharkFrame_zero.hpp --- a/src/cpu/zero/vm/sharkFrame_zero.hpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/cpu/zero/vm/sharkFrame_zero.hpp Wed Dec 02 07:59:49 2009 -0800 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2008 Red Hat, Inc. + * Copyright 2008, 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ // | ... | class SharkFrame : public ZeroFrame { - friend class SharkFunction; + friend class SharkStack; private: SharkFrame() : ZeroFrame() { diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -281,7 +281,7 @@ #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ if ((skip) <= 0) { \ - if (UseCompiler && UseLoopCounter) { \ + if (UseLoopCounter) { \ bool do_OSR = UseOnStackReplacement; \ BACKEDGE_COUNT->increment(); \ if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit(); \ @@ -289,16 +289,12 @@ nmethod* osr_nmethod; \ OSR_REQUEST(osr_nmethod, branch_pc); \ if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ - intptr_t* buf; \ - CALL_VM(buf=SharedRuntime::OSR_migration_begin(THREAD), handle_exception); \ + intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \ istate->set_msg(do_osr); \ istate->set_osr_buf((address)buf); \ istate->set_osr_entry(osr_nmethod->osr_entry()); \ return; \ } \ - } else { \ - INCR_INVOCATION_COUNT; \ - SAFEPOINT; \ } \ } /* UseCompiler ... */ \ INCR_INVOCATION_COUNT; \ @@ -1281,12 +1277,7 @@ jfloat f; jdouble r; f = STACK_FLOAT(-1); -#ifdef IA64 - // IA64 gcc bug - r = ( f == 0.0f ) ? (jdouble) f : (jdouble) f + ia64_double_zero; -#else r = (jdouble) f; -#endif MORE_STACK(-1); // POP SET_STACK_DOUBLE(r, 1); UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -465,9 +465,11 @@ void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { assert(t->is_valid(), "template must exist"); switch (t->tos_in()) { - case btos: vep = __ pc(); __ pop(btos); bep = __ pc(); generate_and_dispatch(t); break; - case ctos: vep = __ pc(); __ pop(ctos); sep = __ pc(); generate_and_dispatch(t); break; - case stos: vep = __ pc(); __ pop(stos); sep = __ pc(); generate_and_dispatch(t); break; + case btos: + case ctos: + case stos: + ShouldNotReachHere(); // btos/ctos/stos should use itos. + break; case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break; case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break; case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break; diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/opto/escape.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -1150,7 +1150,6 @@ } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); - assert(addr->is_AddP(), "AddP required"); const Type *addr_t = igvn->type(addr); if (addr_t == Type::TOP) continue; diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/opto/parse1.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -231,12 +231,13 @@ // Use the raw liveness computation to make sure that unexpected // values don't propagate into the OSR frame. - MethodLivenessResult live_locals = method()->raw_liveness_at_bci(osr_bci()); + MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci()); if (!live_locals.is_valid()) { // Degenerate or breakpointed method. C->record_method_not_compilable("OSR in empty or breakpointed method"); return; } + MethodLivenessResult raw_live_locals = method()->raw_liveness_at_bci(osr_bci()); // Extract the needed locals from the interpreter frame. Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize); @@ -316,6 +317,10 @@ continue; } } + if (type->basic_type() == T_ADDRESS && !raw_live_locals.at(index)) { + // Skip type check for dead address locals + continue; + } set_local(index, check_interpreter_type(l, type, bad_type_exit)); } diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/prims/jni.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -3231,6 +3231,21 @@ jint result = JNI_ERR; DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result); + // We're about to use Atomic::xchg for synchronization. Some Zero + // platforms use the GCC builtin __sync_lock_test_and_set for this, + // but __sync_lock_test_and_set is not guaranteed to do what we want + // on all architectures. So we check it works before relying on it. +#if defined(ZERO) && defined(ASSERT) + { + jint a = 0xcafebabe; + jint b = Atomic::xchg(0xdeadbeef, &a); + void *c = &a; + void *d = Atomic::xchg_ptr(&b, &c); + assert(a == 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); + assert(c == &b && d == &a, "Atomic::xchg_ptr() works"); + } +#endif // ZERO && ASSERT + // At the moment it's only possible to have one Java VM, // since some of the runtime state is in global variables. diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/prims/jvmtiManageCapabilities.cpp --- a/src/share/vm/prims/jvmtiManageCapabilities.cpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp Wed Dec 02 07:59:49 2009 -0800 @@ -115,8 +115,10 @@ jvmtiCapabilities jc; memset(&jc, 0, sizeof(jc)); +#ifndef CC_INTERP jc.can_pop_frame = 1; jc.can_force_early_return = 1; +#endif // !CC_INTERP jc.can_get_source_debug_extension = 1; jc.can_access_local_variables = 1; jc.can_maintain_original_method_order = 1; diff -r 4b966d9946a3 -r 0018cf203583 src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Wed Nov 25 08:37:04 2009 -0800 +++ b/src/share/vm/runtime/os.hpp Wed Dec 02 07:59:49 2009 -0800 @@ -294,19 +294,16 @@ } static bool is_memory_serialize_page(JavaThread *thread, address addr) { - address thr_addr; if (UseMembar) return false; - // Calculate thread specific address + // Previously this function calculated the exact address of this + // thread's serialize page, and checked if the faulting address + // was equal. However, some platforms mask off faulting addresses + // to the page size, so now we just check that the address is + // within the page. This makes the thread argument unnecessary, + // but we retain the NULL check to preserve existing behaviour. if (thread == NULL) return false; - // TODO-FIXME: some platforms mask off faulting addresses to the base pagesize. - // Instead of using a test for equality we should probably use something - // of the form: - // return ((_mem_serialize_page ^ addr) & -pagesize) == 0 - // - thr_addr = (address)(((uintptr_t)thread >> - get_serialize_page_shift_count()) & - get_serialize_page_mask()) + (uintptr_t)_mem_serialize_page; - return (thr_addr == addr); + address page = (address) _mem_serialize_page; + return addr >= page && addr < (page + os::vm_page_size()); } static void block_on_serialize_page_trap();