1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H 3 #define _TOOLS_LINUX_ASM_X86_BARRIER_H 4 5 /* 6 * Copied from the Linux kernel sources, and also moving code 7 * out from tools/perf/perf-sys.h so as to make it be located 8 * in a place similar as in the kernel sources. 9 * 10 * Force strict CPU ordering. 11 * And yes, this is required on UP too when we're talking 12 * to devices. 13 */ 14 15 #if defined(__i386__) 16 /* 17 * Some non-Intel clones support out of order store. wmb() ceases to be a 18 * nop for these. 19 */ 20 #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 21 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 22 #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 23 #elif defined(__x86_64__) 24 #define mb() asm volatile("mfence":::"memory") 25 #define rmb() asm volatile("lfence":::"memory") 26 #define wmb() asm volatile("sfence" ::: "memory") 27 #endif 28 29 #endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */ 30