blob: f366d8e550e4ae0f83a4ec4baf0b42eee5570199 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
2#define _TOOLS_LINUX_ASM_X86_BARRIER_H
3
4/*
5 * Copied from the Linux kernel sources, and also moving code
6 * out from tools/perf/perf-sys.h so as to make it be located
7 * in a place similar as in the kernel sources.
8 *
9 * Force strict CPU ordering.
10 * And yes, this is required on UP too when we're talking
11 * to devices.
12 */
13
14#if defined(__i386__)
15/*
16 * Some non-Intel clones support out of order store. wmb() ceases to be a
17 * nop for these.
18 */
19#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
20#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
21#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
22#elif defined(__x86_64__)
23#define mb() asm volatile("mfence":::"memory")
24#define rmb() asm volatile("lfence":::"memory")
25#define wmb() asm volatile("sfence" ::: "memory")
26#endif
27
28#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */