comparison src/cpu/x86/vm/assembler_x86.hpp @ 671:d0994e5bebce

6822204: volatile fences should prefer lock:addl to actual mfence instructions Reviewed-by: kvn, phh
author never
date Thu, 26 Mar 2009 14:31:45 -0700
parents c89f86385056
children fbde8ec322d0
comparison
equal deleted inserted replaced
668:90a66aa50514 671:d0994e5bebce
1066 LoadStore = 1 << 2, 1066 LoadStore = 1 << 2,
1067 StoreLoad = 1 << 1, 1067 StoreLoad = 1 << 1,
1068 LoadLoad = 1 << 0 1068 LoadLoad = 1 << 0
1069 }; 1069 };
1070 1070
1071 // Serializes memory. 1071 // Serializes memory and blows flags
1072 void membar(Membar_mask_bits order_constraint) { 1072 void membar(Membar_mask_bits order_constraint) {
1073 // We only have to handle StoreLoad and LoadLoad 1073 if (os::is_MP()) {
1074 if (order_constraint & StoreLoad) { 1074 // We only have to handle StoreLoad
1075 // MFENCE subsumes LFENCE 1075 if (order_constraint & StoreLoad) {
1076 mfence(); 1076 // All usable chips support "locked" instructions which suffice
1077 } /* [jk] not needed currently: else if (order_constraint & LoadLoad) { 1077 // as barriers, and are much faster than the alternative of
1078 lfence(); 1078 // using cpuid instruction. We use here a locked add [esp],0.
1079 } */ 1079 // This is conveniently otherwise a no-op except for blowing
1080 // flags.
1081 // Any change to this code may need to revisit other places in
1082 // the code where this idiom is used, in particular the
1083 // orderAccess code.
1084 lock();
1085 addl(Address(rsp, 0), 0);// Assert the lock# signal here
1086 }
1087 }
1080 } 1088 }
1081 1089
1082 void mfence(); 1090 void mfence();
1083 1091
1084 // Moves 1092 // Moves