]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
ae3a197e DH |
2 | /* |
3 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
4 | */ | |
5 | #ifndef _ASM_POWERPC_BARRIER_H | |
6 | #define _ASM_POWERPC_BARRIER_H | |
7 | ||
8 | /* | |
9 | * Memory barrier. | |
10 | * The sync instruction guarantees that all memory accesses initiated | |
11 | * by this processor have been performed (with respect to all other | |
12 | * mechanisms that access memory). The eieio instruction is a barrier | |
13 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
14 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
15 | * | |
16 | * mb() prevents loads and stores being reordered across this point. | |
17 | * rmb() prevents loads being reordered across this point. | |
18 | * wmb() prevents stores being reordered across this point. | |
19 | * read_barrier_depends() prevents data-dependent loads being reordered | |
20 | * across this point (nop on PPC). | |
21 | * | |
22 | * *mb() variants without smp_ prefix must order all types of memory | |
23 | * operations with one another. sync is the only instruction sufficient | |
24 | * to do this. | |
25 | * | |
26 | * For the smp_ barriers, ordering is for cacheable memory operations | |
27 | * only. We have to use the sync instruction for smp_mb(), since lwsync | |
28 | * doesn't order loads with respect to previous stores. Lwsync can be | |
29 | * used for smp_rmb() and smp_wmb(). | |
30 | * | |
31 | * However, on CPUs that don't support lwsync, lwsync actually maps to a | |
32 | * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. | |
33 | */ | |
34 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
35 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | |
36 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | |
ae3a197e | 37 | |
ae3a197e DH |
38 | #ifdef __SUBARCH_HAS_LWSYNC |
39 | # define SMPWMB LWSYNC | |
40 | #else | |
41 | # define SMPWMB eieio | |
42 | #endif | |
43 | ||
47933ad4 | 44 | #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") |
1077fa36 AD |
45 | #define dma_rmb() __lwsync() |
46 | #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | |
47 | ||
003472a9 | 48 | #define __smp_lwsync() __lwsync() |
47933ad4 | 49 | |
003472a9 MT |
50 | #define __smp_mb() mb() |
51 | #define __smp_rmb() __lwsync() | |
52 | #define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | |
ae3a197e DH |
53 | |
54 | /* | |
55 | * This is a barrier which prevents following instructions from being | |
56 | * started until the value of the argument x is known. For example, if | |
57 | * x is a variable loaded from memory, this prevents following | |
58 | * instructions from being executed until the load has been performed. | |
59 | */ | |
60 | #define data_barrier(x) \ | |
61 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | |
62 | ||
003472a9 | 63 | #define __smp_store_release(p, v) \ |
47933ad4 PZ |
64 | do { \ |
65 | compiletime_assert_atomic_type(*p); \ | |
003472a9 | 66 | __smp_lwsync(); \ |
76695af2 | 67 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
68 | } while (0) |
69 | ||
003472a9 | 70 | #define __smp_load_acquire(p) \ |
47933ad4 | 71 | ({ \ |
76695af2 | 72 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 | 73 | compiletime_assert_atomic_type(*p); \ |
003472a9 | 74 | __smp_lwsync(); \ |
47933ad4 PZ |
75 | ___p1; \ |
76 | }) | |
77 | ||
fbd7ec02 MT |
78 | #include <asm-generic/barrier.h> |
79 | ||
ae3a197e | 80 | #endif /* _ASM_POWERPC_BARRIER_H */ |