comparison src/cpu/x86/vm/x86_32.ad @ 17714:4d4ea046d32a

8033805: Move Fast_Lock/Fast_Unlock code from .ad files to macroassembler Summary: Consolidated C2 x86 locking code in one place in macroAssembler_x86.cpp. Reviewed-by: roland
author kvn
date Mon, 24 Feb 2014 15:12:26 -0800
parents 984401824c5e
children 085b304a1cc5
comparison
equal deleted inserted replaced
17713:e7cf34c87433 17714:4d4ea046d32a
2908 emit_opcode(cbuf,0x83); // SBB hi,0 2908 emit_opcode(cbuf,0x83); // SBB hi,0
2909 emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg)); 2909 emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
2910 emit_d8 (cbuf,0 ); 2910 emit_d8 (cbuf,0 );
2911 %} 2911 %}
2912 2912
2913
2914 // Because the transitions from emitted code to the runtime
2915 // monitorenter/exit helper stubs are so slow it's critical that
2916 // we inline both the stack-locking fast-path and the inflated fast path.
2917 //
2918 // See also: cmpFastLock and cmpFastUnlock.
2919 //
2920 // What follows is a specialized inline transliteration of the code
2921 // in slow_enter() and slow_exit(). If we're concerned about I$ bloat
2922 // another option would be to emit TrySlowEnter and TrySlowExit methods
2923 // at startup-time. These methods would accept arguments as
2924 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
2925 // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply
2926 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
2927 // In practice, however, the # of lock sites is bounded and is usually small.
2928 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
2929 // if the processor uses simple bimodal branch predictors keyed by EIP
2930 // Since the helper routines would be called from multiple synchronization
2931 // sites.
2932 //
2933 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
2934 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
2935 // to those specialized methods. That'd give us a mostly platform-independent
2936 // implementation that the JITs could optimize and inline at their pleasure.
2937 // Done correctly, the only time we'd need to cross to native could would be
2938 // to park() or unpark() threads. We'd also need a few more unsafe operators
2939 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
2940 // (b) explicit barriers or fence operations.
2941 //
2942 // TODO:
2943 //
2944 // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
2945 // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
2946 // Given TLAB allocation, Self is usually manifested in a register, so passing it into
2947 // the lock operators would typically be faster than reifying Self.
2948 //
2949 // * Ideally I'd define the primitives as:
2950 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
2951 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
2952 // Unfortunately ADLC bugs prevent us from expressing the ideal form.
2953 // Instead, we're stuck with a rather awkward and brittle register assignments below.
2954 // Furthermore the register assignments are overconstrained, possibly resulting in
2955 // sub-optimal code near the synchronization site.
2956 //
2957 // * Eliminate the sp-proximity tests and just use "== Self" tests instead.
2958 // Alternately, use a better sp-proximity test.
2959 //
2960 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
2961 // Either one is sufficient to uniquely identify a thread.
2962 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
2963 //
2964 // * Intrinsify notify() and notifyAll() for the common cases where the
2965 // object is locked by the calling thread but the waitlist is empty.
2966 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
2967 //
2968 // * use jccb and jmpb instead of jcc and jmp to improve code density.
2969 // But beware of excessive branch density on AMD Opterons.
2970 //
2971 // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
2972 // or failure of the fast-path. If the fast-path fails then we pass
2973 // control to the slow-path, typically in C. In Fast_Lock and
2974 // Fast_Unlock we often branch to DONE_LABEL, just to find that C2
2975 // will emit a conditional branch immediately after the node.
2976 // So we have branches to branches and lots of ICC.ZF games.
2977 // Instead, it might be better to have C2 pass a "FailureLabel"
2978 // into Fast_Lock and Fast_Unlock. In the case of success, control
2979 // will drop through the node. ICC.ZF is undefined at exit.
2980 // In the case of failure, the node will branch directly to the
2981 // FailureLabel
2982
2983
2984 // obj: object to lock
2985 // box: on-stack box address (displaced header location) - KILLED
2986 // rax,: tmp -- KILLED
2987 // scr: tmp -- KILLED
2988 enc_class Fast_Lock( eRegP obj, eRegP box, eAXRegI tmp, eRegP scr ) %{
2989
2990 Register objReg = as_Register($obj$$reg);
2991 Register boxReg = as_Register($box$$reg);
2992 Register tmpReg = as_Register($tmp$$reg);
2993 Register scrReg = as_Register($scr$$reg);
2994
2995 // Ensure the register assignents are disjoint
2996 guarantee (objReg != boxReg, "") ;
2997 guarantee (objReg != tmpReg, "") ;
2998 guarantee (objReg != scrReg, "") ;
2999 guarantee (boxReg != tmpReg, "") ;
3000 guarantee (boxReg != scrReg, "") ;
3001 guarantee (tmpReg == as_Register(EAX_enc), "") ;
3002
3003 MacroAssembler masm(&cbuf);
3004
3005 if (_counters != NULL) {
3006 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
3007 }
3008 if (EmitSync & 1) {
3009 // set box->dhw = unused_mark (3)
3010 // Force all sync thru slow-path: slow_enter() and slow_exit()
3011 masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ;
3012 masm.cmpptr (rsp, (int32_t)0) ;
3013 } else
3014 if (EmitSync & 2) {
3015 Label DONE_LABEL ;
3016 if (UseBiasedLocking) {
3017 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
3018 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
3019 }
3020
3021 masm.movptr(tmpReg, Address(objReg, 0)) ; // fetch markword
3022 masm.orptr (tmpReg, 0x1);
3023 masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
3024 if (os::is_MP()) { masm.lock(); }
3025 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3026 masm.jcc(Assembler::equal, DONE_LABEL);
3027 // Recursive locking
3028 masm.subptr(tmpReg, rsp);
3029 masm.andptr(tmpReg, (int32_t) 0xFFFFF003 );
3030 masm.movptr(Address(boxReg, 0), tmpReg);
3031 masm.bind(DONE_LABEL) ;
3032 } else {
3033 // Possible cases that we'll encounter in fast_lock
3034 // ------------------------------------------------
3035 // * Inflated
3036 // -- unlocked
3037 // -- Locked
3038 // = by self
3039 // = by other
3040 // * biased
3041 // -- by Self
3042 // -- by other
3043 // * neutral
3044 // * stack-locked
3045 // -- by self
3046 // = sp-proximity test hits
3047 // = sp-proximity test generates false-negative
3048 // -- by other
3049 //
3050
3051 Label IsInflated, DONE_LABEL, PopDone ;
3052
3053 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
3054 // order to reduce the number of conditional branches in the most common cases.
3055 // Beware -- there's a subtle invariant that fetch of the markword
3056 // at [FETCH], below, will never observe a biased encoding (*101b).
3057 // If this invariant is not held we risk exclusion (safety) failure.
3058 if (UseBiasedLocking && !UseOptoBiasInlining) {
3059 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
3060 }
3061
3062 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
3063 masm.testptr(tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral)
3064 masm.jccb (Assembler::notZero, IsInflated) ;
3065
3066 // Attempt stack-locking ...
3067 masm.orptr (tmpReg, 0x1);
3068 masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
3069 if (os::is_MP()) { masm.lock(); }
3070 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3071 if (_counters != NULL) {
3072 masm.cond_inc32(Assembler::equal,
3073 ExternalAddress((address)_counters->fast_path_entry_count_addr()));
3074 }
3075 masm.jccb (Assembler::equal, DONE_LABEL);
3076
3077 // Recursive locking
3078 masm.subptr(tmpReg, rsp);
3079 masm.andptr(tmpReg, 0xFFFFF003 );
3080 masm.movptr(Address(boxReg, 0), tmpReg);
3081 if (_counters != NULL) {
3082 masm.cond_inc32(Assembler::equal,
3083 ExternalAddress((address)_counters->fast_path_entry_count_addr()));
3084 }
3085 masm.jmp (DONE_LABEL) ;
3086
3087 masm.bind (IsInflated) ;
3088
3089 // The object is inflated.
3090 //
3091 // TODO-FIXME: eliminate the ugly use of manifest constants:
3092 // Use markOopDesc::monitor_value instead of "2".
3093 // use markOop::unused_mark() instead of "3".
3094 // The tmpReg value is an objectMonitor reference ORed with
3095 // markOopDesc::monitor_value (2). We can either convert tmpReg to an
3096 // objectmonitor pointer by masking off the "2" bit or we can just
3097 // use tmpReg as an objectmonitor pointer but bias the objectmonitor
3098 // field offsets with "-2" to compensate for and annul the low-order tag bit.
3099 //
3100 // I use the latter as it avoids AGI stalls.
3101 // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
3102 // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
3103 //
3104 #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
3105
3106 // boxReg refers to the on-stack BasicLock in the current frame.
3107 // We'd like to write:
3108 // set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices.
3109 // This is convenient but results a ST-before-CAS penalty. The following CAS suffers
3110 // additional latency as we have another ST in the store buffer that must drain.
3111
3112 if (EmitSync & 8192) {
3113 masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
3114 masm.get_thread (scrReg) ;
3115 masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
3116 masm.movptr(tmpReg, NULL_WORD); // consider: xor vs mov
3117 if (os::is_MP()) { masm.lock(); }
3118 masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3119 } else
3120 if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
3121 masm.movptr(scrReg, boxReg) ;
3122 masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
3123
3124 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
3125 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
3126 // prefetchw [eax + Offset(_owner)-2]
3127 masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
3128 }
3129
3130 if ((EmitSync & 64) == 0) {
3131 // Optimistic form: consider XORL tmpReg,tmpReg
3132 masm.movptr(tmpReg, NULL_WORD) ;
3133 } else {
3134 // Can suffer RTS->RTO upgrades on shared or cold $ lines
3135 // Test-And-CAS instead of CAS
3136 masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
3137 masm.testptr(tmpReg, tmpReg) ; // Locked ?
3138 masm.jccb (Assembler::notZero, DONE_LABEL) ;
3139 }
3140
3141 // Appears unlocked - try to swing _owner from null to non-null.
3142 // Ideally, I'd manifest "Self" with get_thread and then attempt
3143 // to CAS the register containing Self into m->Owner.
3144 // But we don't have enough registers, so instead we can either try to CAS
3145 // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds
3146 // we later store "Self" into m->Owner. Transiently storing a stack address
3147 // (rsp or the address of the box) into m->owner is harmless.
3148 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
3149 if (os::is_MP()) { masm.lock(); }
3150 masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3151 masm.movptr(Address(scrReg, 0), 3) ; // box->_displaced_header = 3
3152 masm.jccb (Assembler::notZero, DONE_LABEL) ;
3153 masm.get_thread (scrReg) ; // beware: clobbers ICCs
3154 masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ;
3155 masm.xorptr(boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success
3156
3157 // If the CAS fails we can either retry or pass control to the slow-path.
3158 // We use the latter tactic.
3159 // Pass the CAS result in the icc.ZFlag into DONE_LABEL
3160 // If the CAS was successful ...
3161 // Self has acquired the lock
3162 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
3163 // Intentional fall-through into DONE_LABEL ...
3164 } else {
3165 masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
3166 masm.movptr(boxReg, tmpReg) ;
3167
3168 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
3169 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
3170 // prefetchw [eax + Offset(_owner)-2]
3171 masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
3172 }
3173
3174 if ((EmitSync & 64) == 0) {
3175 // Optimistic form
3176 masm.xorptr (tmpReg, tmpReg) ;
3177 } else {
3178 // Can suffer RTS->RTO upgrades on shared or cold $ lines
3179 masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
3180 masm.testptr(tmpReg, tmpReg) ; // Locked ?
3181 masm.jccb (Assembler::notZero, DONE_LABEL) ;
3182 }
3183
3184 // Appears unlocked - try to swing _owner from null to non-null.
3185 // Use either "Self" (in scr) or rsp as thread identity in _owner.
3186 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
3187 masm.get_thread (scrReg) ;
3188 if (os::is_MP()) { masm.lock(); }
3189 masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3190
3191 // If the CAS fails we can either retry or pass control to the slow-path.
3192 // We use the latter tactic.
3193 // Pass the CAS result in the icc.ZFlag into DONE_LABEL
3194 // If the CAS was successful ...
3195 // Self has acquired the lock
3196 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
3197 // Intentional fall-through into DONE_LABEL ...
3198 }
3199
3200 // DONE_LABEL is a hot target - we'd really like to place it at the
3201 // start of cache line by padding with NOPs.
3202 // See the AMD and Intel software optimization manuals for the
3203 // most efficient "long" NOP encodings.
3204 // Unfortunately none of our alignment mechanisms suffice.
3205 masm.bind(DONE_LABEL);
3206
3207 // Avoid branch-to-branch on AMD processors
3208 // This appears to be superstition.
3209 if (EmitSync & 32) masm.nop() ;
3210
3211
3212 // At DONE_LABEL the icc ZFlag is set as follows ...
3213 // Fast_Unlock uses the same protocol.
3214 // ZFlag == 1 -> Success
3215 // ZFlag == 0 -> Failure - force control through the slow-path
3216 }
3217 %}
3218
3219 // obj: object to unlock
3220 // box: box address (displaced header location), killed. Must be EAX.
3221 // rbx,: killed tmp; cannot be obj nor box.
3222 //
3223 // Some commentary on balanced locking:
3224 //
3225 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
3226 // Methods that don't have provably balanced locking are forced to run in the
3227 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
3228 // The interpreter provides two properties:
3229 // I1: At return-time the interpreter automatically and quietly unlocks any
3230 // objects acquired the current activation (frame). Recall that the
3231 // interpreter maintains an on-stack list of locks currently held by
3232 // a frame.
3233 // I2: If a method attempts to unlock an object that is not held by the
3234 // the frame the interpreter throws IMSX.
3235 //
3236 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
3237 // B() doesn't have provably balanced locking so it runs in the interpreter.
3238 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O
3239 // is still locked by A().
3240 //
3241 // The only other source of unbalanced locking would be JNI. The "Java Native Interface:
3242 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
3243 // should not be unlocked by "normal" java-level locking and vice-versa. The specification
3244 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
3245
3246 enc_class Fast_Unlock( nabxRegP obj, eAXRegP box, eRegP tmp) %{
3247
3248 Register objReg = as_Register($obj$$reg);
3249 Register boxReg = as_Register($box$$reg);
3250 Register tmpReg = as_Register($tmp$$reg);
3251
3252 guarantee (objReg != boxReg, "") ;
3253 guarantee (objReg != tmpReg, "") ;
3254 guarantee (boxReg != tmpReg, "") ;
3255 guarantee (boxReg == as_Register(EAX_enc), "") ;
3256 MacroAssembler masm(&cbuf);
3257
3258 if (EmitSync & 4) {
3259 // Disable - inhibit all inlining. Force control through the slow-path
3260 masm.cmpptr (rsp, 0) ;
3261 } else
3262 if (EmitSync & 8) {
3263 Label DONE_LABEL ;
3264 if (UseBiasedLocking) {
3265 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3266 }
3267 // classic stack-locking code ...
3268 masm.movptr(tmpReg, Address(boxReg, 0)) ;
3269 masm.testptr(tmpReg, tmpReg) ;
3270 masm.jcc (Assembler::zero, DONE_LABEL) ;
3271 if (os::is_MP()) { masm.lock(); }
3272 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
3273 masm.bind(DONE_LABEL);
3274 } else {
3275 Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
3276
3277 // Critically, the biased locking test must have precedence over
3278 // and appear before the (box->dhw == 0) recursive stack-lock test.
3279 if (UseBiasedLocking && !UseOptoBiasInlining) {
3280 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3281 }
3282
3283 masm.cmpptr(Address(boxReg, 0), 0) ; // Examine the displaced header
3284 masm.movptr(tmpReg, Address(objReg, 0)) ; // Examine the object's markword
3285 masm.jccb (Assembler::zero, DONE_LABEL) ; // 0 indicates recursive stack-lock
3286
3287 masm.testptr(tmpReg, 0x02) ; // Inflated?
3288 masm.jccb (Assembler::zero, Stacked) ;
3289
3290 masm.bind (Inflated) ;
3291 // It's inflated.
3292 // Despite our balanced locking property we still check that m->_owner == Self
3293 // as java routines or native JNI code called by this thread might
3294 // have released the lock.
3295 // Refer to the comments in synchronizer.cpp for how we might encode extra
3296 // state in _succ so we can avoid fetching EntryList|cxq.
3297 //
3298 // I'd like to add more cases in fast_lock() and fast_unlock() --
3299 // such as recursive enter and exit -- but we have to be wary of
3300 // I$ bloat, T$ effects and BP$ effects.
3301 //
3302 // If there's no contention try a 1-0 exit. That is, exit without
3303 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how
3304 // we detect and recover from the race that the 1-0 exit admits.
3305 //
3306 // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
3307 // before it STs null into _owner, releasing the lock. Updates
3308 // to data protected by the critical section must be visible before
3309 // we drop the lock (and thus before any other thread could acquire
3310 // the lock and observe the fields protected by the lock).
3311 // IA32's memory-model is SPO, so STs are ordered with respect to
3312 // each other and there's no need for an explicit barrier (fence).
3313 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
3314
3315 masm.get_thread (boxReg) ;
3316 if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
3317 // prefetchw [ebx + Offset(_owner)-2]
3318 masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
3319 }
3320
3321 // Note that we could employ various encoding schemes to reduce
3322 // the number of loads below (currently 4) to just 2 or 3.
3323 // Refer to the comments in synchronizer.cpp.
3324 // In practice the chain of fetches doesn't seem to impact performance, however.
3325 if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
3326 // Attempt to reduce branch density - AMD's branch predictor.
3327 masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3328 masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3329 masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3330 masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3331 masm.jccb (Assembler::notZero, DONE_LABEL) ;
3332 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
3333 masm.jmpb (DONE_LABEL) ;
3334 } else {
3335 masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3336 masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3337 masm.jccb (Assembler::notZero, DONE_LABEL) ;
3338 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3339 masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3340 masm.jccb (Assembler::notZero, CheckSucc) ;
3341 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
3342 masm.jmpb (DONE_LABEL) ;
3343 }
3344
3345 // The Following code fragment (EmitSync & 65536) improves the performance of
3346 // contended applications and contended synchronization microbenchmarks.
3347 // Unfortunately the emission of the code - even though not executed - causes regressions
3348 // in scimark and jetstream, evidently because of $ effects. Replacing the code
3349 // with an equal number of never-executed NOPs results in the same regression.
3350 // We leave it off by default.
3351
3352 if ((EmitSync & 65536) != 0) {
3353 Label LSuccess, LGoSlowPath ;
3354
3355 masm.bind (CheckSucc) ;
3356
3357 // Optional pre-test ... it's safe to elide this
3358 if ((EmitSync & 16) == 0) {
3359 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
3360 masm.jccb (Assembler::zero, LGoSlowPath) ;
3361 }
3362
3363 // We have a classic Dekker-style idiom:
3364 // ST m->_owner = 0 ; MEMBAR; LD m->_succ
3365 // There are a number of ways to implement the barrier:
3366 // (1) lock:andl &m->_owner, 0
3367 // is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
3368 // LOCK: ANDL [ebx+Offset(_Owner)-2], 0
3369 // Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
3370 // (2) If supported, an explicit MFENCE is appealing.
3371 // In older IA32 processors MFENCE is slower than lock:add or xchg
3372 // particularly if the write-buffer is full as might be the case if
3373 // if stores closely precede the fence or fence-equivalent instruction.
3374 // In more modern implementations MFENCE appears faster, however.
3375 // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
3376 // The $lines underlying the top-of-stack should be in M-state.
3377 // The locked add instruction is serializing, of course.
3378 // (4) Use xchg, which is serializing
3379 // mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
3380 // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
3381 // The integer condition codes will tell us if succ was 0.
3382 // Since _succ and _owner should reside in the same $line and
3383 // we just stored into _owner, it's likely that the $line
3384 // remains in M-state for the lock:orl.
3385 //
3386 // We currently use (3), although it's likely that switching to (2)
3387 // is correct for the future.
3388
3389 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
3390 if (os::is_MP()) {
3391 if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
3392 masm.mfence();
3393 } else {
3394 masm.lock () ; masm.addptr(Address(rsp, 0), 0) ;
3395 }
3396 }
3397 // Ratify _succ remains non-null
3398 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
3399 masm.jccb (Assembler::notZero, LSuccess) ;
3400
3401 masm.xorptr(boxReg, boxReg) ; // box is really EAX
3402 if (os::is_MP()) { masm.lock(); }
3403 masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
3404 masm.jccb (Assembler::notEqual, LSuccess) ;
3405 // Since we're low on registers we installed rsp as a placeholding in _owner.
3406 // Now install Self over rsp. This is safe as we're transitioning from
3407 // non-null to non=null
3408 masm.get_thread (boxReg) ;
3409 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
3410 // Intentional fall-through into LGoSlowPath ...
3411
3412 masm.bind (LGoSlowPath) ;
3413 masm.orptr(boxReg, 1) ; // set ICC.ZF=0 to indicate failure
3414 masm.jmpb (DONE_LABEL) ;
3415
3416 masm.bind (LSuccess) ;
3417 masm.xorptr(boxReg, boxReg) ; // set ICC.ZF=1 to indicate success
3418 masm.jmpb (DONE_LABEL) ;
3419 }
3420
3421 masm.bind (Stacked) ;
3422 // It's not inflated and it's not recursively stack-locked and it's not biased.
3423 // It must be stack-locked.
3424 // Try to reset the header to displaced header.
3425 // The "box" value on the stack is stable, so we can reload
3426 // and be assured we observe the same value as above.
3427 masm.movptr(tmpReg, Address(boxReg, 0)) ;
3428 if (os::is_MP()) { masm.lock(); }
3429 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
3430 // Intention fall-thru into DONE_LABEL
3431
3432
3433 // DONE_LABEL is a hot target - we'd really like to place it at the
3434 // start of cache line by padding with NOPs.
3435 // See the AMD and Intel software optimization manuals for the
3436 // most efficient "long" NOP encodings.
3437 // Unfortunately none of our alignment mechanisms suffice.
3438 if ((EmitSync & 65536) == 0) {
3439 masm.bind (CheckSucc) ;
3440 }
3441 masm.bind(DONE_LABEL);
3442
3443 // Avoid branch to branch on AMD processors
3444 if (EmitSync & 32768) { masm.nop() ; }
3445 }
3446 %}
3447
3448
3449 enc_class enc_pop_rdx() %{ 2913 enc_class enc_pop_rdx() %{
3450 emit_opcode(cbuf,0x5A); 2914 emit_opcode(cbuf,0x5A);
3451 %} 2915 %}
3452 2916
3453 enc_class enc_rethrow() %{ 2917 enc_class enc_rethrow() %{
13145 ins_pipe( pipe_jmp ); 12609 ins_pipe( pipe_jmp );
13146 %} 12610 %}
13147 12611
13148 // inlined locking and unlocking 12612 // inlined locking and unlocking
13149 12613
13150 12614 instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
13151 instruct cmpFastLock( eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{ 12615 match(Set cr (FastLock object box));
13152 match( Set cr (FastLock object box) ); 12616 effect(TEMP tmp, TEMP scr, USE_KILL box);
13153 effect( TEMP tmp, TEMP scr, USE_KILL box );
13154 ins_cost(300); 12617 ins_cost(300);
13155 format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %} 12618 format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
13156 ins_encode( Fast_Lock(object,box,tmp,scr) ); 12619 ins_encode %{
13157 ins_pipe( pipe_slow ); 12620 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
13158 %} 12621 %}
13159 12622 ins_pipe(pipe_slow);
13160 instruct cmpFastUnlock( eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{ 12623 %}
13161 match( Set cr (FastUnlock object box) ); 12624
13162 effect( TEMP tmp, USE_KILL box ); 12625 instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
12626 match(Set cr (FastUnlock object box));
12627 effect(TEMP tmp, USE_KILL box);
13163 ins_cost(300); 12628 ins_cost(300);
13164 format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %} 12629 format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
13165 ins_encode( Fast_Unlock(object,box,tmp) ); 12630 ins_encode %{
13166 ins_pipe( pipe_slow ); 12631 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12632 %}
12633 ins_pipe(pipe_slow);
13167 %} 12634 %}
13168 12635
13169 12636
13170 12637
13171 // ============================================================================ 12638 // ============================================================================