Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/arch/x86/include/asm/barrier.h
29274 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
3
#define _TOOLS_LINUX_ASM_X86_BARRIER_H
4
5
/*
6
* Copied from the Linux kernel sources, and also moving code
7
* out from tools/perf/perf-sys.h so as to make it be located
8
* in a place similar as in the kernel sources.
9
*
10
* Force strict CPU ordering.
11
* And yes, this is required on UP too when we're talking
12
* to devices.
13
*/
14
15
#if defined(__i386__)
16
/*
17
* Some non-Intel clones support out of order store. wmb() ceases to be a
18
* nop for these.
19
*/
20
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
21
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
22
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
23
#elif defined(__x86_64__)
24
#define mb() asm volatile("mfence" ::: "memory")
25
#define rmb() asm volatile("lfence" ::: "memory")
26
#define wmb() asm volatile("sfence" ::: "memory")
27
#define smp_rmb() barrier()
28
#define smp_wmb() barrier()
29
#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
30
#endif
31
32
#if defined(__x86_64__)
33
#define smp_store_release(p, v) \
34
do { \
35
barrier(); \
36
WRITE_ONCE(*p, v); \
37
} while (0)
38
39
#define smp_load_acquire(p) \
40
({ \
41
typeof(*p) ___p1 = READ_ONCE(*p); \
42
barrier(); \
43
___p1; \
44
})
45
#endif /* defined(__x86_64__) */
46
#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */
47
48