]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/barrier.h
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / barrier.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BARRIER_H
3 #define _ASM_X86_BARRIER_H
4
5 #include <asm/alternative.h>
6 #include <asm/nops.h>
7
8 /*
9 * Force strict CPU ordering.
10 * And yes, this might be required on UP too when we're talking
11 * to devices.
12 */
13
14 #ifdef CONFIG_X86_32
15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
16 X86_FEATURE_XMM2) ::: "memory", "cc")
17 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
18 X86_FEATURE_XMM2) ::: "memory", "cc")
19 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
20 X86_FEATURE_XMM2) ::: "memory", "cc")
21 #else
22 #define mb() asm volatile("mfence":::"memory")
23 #define rmb() asm volatile("lfence":::"memory")
24 #define wmb() asm volatile("sfence" ::: "memory")
25 #endif
26
27 #ifdef CONFIG_X86_PPRO_FENCE
28 #define dma_rmb() rmb()
29 #else
30 #define dma_rmb() barrier()
31 #endif
32 #define dma_wmb() barrier()
33
34 #ifdef CONFIG_X86_32
35 #define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
36 #else
37 #define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
38 #endif
39 #define __smp_rmb() dma_rmb()
40 #define __smp_wmb() barrier()
41 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
42
43 #if defined(CONFIG_X86_PPRO_FENCE)
44
45 /*
46 * For this option x86 doesn't have a strong TSO memory
47 * model and we should fall back to full barriers.
48 */
49
50 #define __smp_store_release(p, v) \
51 do { \
52 compiletime_assert_atomic_type(*p); \
53 __smp_mb(); \
54 WRITE_ONCE(*p, v); \
55 } while (0)
56
57 #define __smp_load_acquire(p) \
58 ({ \
59 typeof(*p) ___p1 = READ_ONCE(*p); \
60 compiletime_assert_atomic_type(*p); \
61 __smp_mb(); \
62 ___p1; \
63 })
64
65 #else /* regular x86 TSO memory ordering */
66
67 #define __smp_store_release(p, v) \
68 do { \
69 compiletime_assert_atomic_type(*p); \
70 barrier(); \
71 WRITE_ONCE(*p, v); \
72 } while (0)
73
74 #define __smp_load_acquire(p) \
75 ({ \
76 typeof(*p) ___p1 = READ_ONCE(*p); \
77 compiletime_assert_atomic_type(*p); \
78 barrier(); \
79 ___p1; \
80 })
81
82 #endif
83
84 /* Atomic operations are already serializing on x86 */
85 #define __smp_mb__before_atomic() barrier()
86 #define __smp_mb__after_atomic() barrier()
87
88 #include <asm-generic/barrier.h>
89
90 #endif /* _ASM_X86_BARRIER_H */