]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f05e798a DH |
2 | #ifndef _ASM_X86_BARRIER_H |
3 | #define _ASM_X86_BARRIER_H | |
4 | ||
5 | #include <asm/alternative.h> | |
6 | #include <asm/nops.h> | |
7 | ||
8 | /* | |
9 | * Force strict CPU ordering. | |
57d9b1b4 | 10 | * And yes, this might be required on UP too when we're talking |
f05e798a DH |
11 | * to devices. |
12 | */ | |
13 | ||
14 | #ifdef CONFIG_X86_32 | |
bd922477 MT |
15 | #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \ |
16 | X86_FEATURE_XMM2) ::: "memory", "cc") | |
17 | #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \ | |
18 | X86_FEATURE_XMM2) ::: "memory", "cc") | |
19 | #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \ | |
20 | X86_FEATURE_XMM2) ::: "memory", "cc") | |
f05e798a DH |
21 | #else |
22 | #define mb() asm volatile("mfence":::"memory") | |
23 | #define rmb() asm volatile("lfence":::"memory") | |
24 | #define wmb() asm volatile("sfence" ::: "memory") | |
25 | #endif | |
26 | ||
f05e798a | 27 | #ifdef CONFIG_X86_PPRO_FENCE |
1077fa36 | 28 | #define dma_rmb() rmb() |
f05e798a | 29 | #else |
1077fa36 | 30 | #define dma_rmb() barrier() |
f05e798a | 31 | #endif |
1077fa36 AD |
32 | #define dma_wmb() barrier() |
33 | ||
1638fb72 MT |
34 | #define __smp_mb() mb() |
35 | #define __smp_rmb() dma_rmb() | |
36 | #define __smp_wmb() barrier() | |
37 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | |
47933ad4 | 38 | |
09df7c4c | 39 | #if defined(CONFIG_X86_PPRO_FENCE) |
47933ad4 PZ |
40 | |
41 | /* | |
4f3aaf2c | 42 | * For this option x86 doesn't have a strong TSO memory |
47933ad4 PZ |
43 | * model and we should fall back to full barriers. |
44 | */ | |
45 | ||
1638fb72 | 46 | #define __smp_store_release(p, v) \ |
47933ad4 PZ |
47 | do { \ |
48 | compiletime_assert_atomic_type(*p); \ | |
1638fb72 | 49 | __smp_mb(); \ |
76695af2 | 50 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
51 | } while (0) |
52 | ||
1638fb72 | 53 | #define __smp_load_acquire(p) \ |
47933ad4 | 54 | ({ \ |
76695af2 | 55 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 | 56 | compiletime_assert_atomic_type(*p); \ |
1638fb72 | 57 | __smp_mb(); \ |
47933ad4 PZ |
58 | ___p1; \ |
59 | }) | |
60 | ||
61 | #else /* regular x86 TSO memory ordering */ | |
62 | ||
1638fb72 | 63 | #define __smp_store_release(p, v) \ |
47933ad4 PZ |
64 | do { \ |
65 | compiletime_assert_atomic_type(*p); \ | |
66 | barrier(); \ | |
76695af2 | 67 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
68 | } while (0) |
69 | ||
1638fb72 | 70 | #define __smp_load_acquire(p) \ |
47933ad4 | 71 | ({ \ |
76695af2 | 72 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 PZ |
73 | compiletime_assert_atomic_type(*p); \ |
74 | barrier(); \ | |
75 | ___p1; \ | |
76 | }) | |
77 | ||
f05e798a DH |
78 | #endif |
79 | ||
d00a5692 | 80 | /* Atomic operations are already serializing on x86 */ |
1638fb72 MT |
81 | #define __smp_mb__before_atomic() barrier() |
82 | #define __smp_mb__after_atomic() barrier() | |
d00a5692 | 83 | |
300b06d4 MT |
84 | #include <asm-generic/barrier.h> |
85 | ||
f05e798a | 86 | #endif /* _ASM_X86_BARRIER_H */ |