]>
Commit | Line | Data |
---|---|---|
ae3a197e DH |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
3 | */ | |
4 | #ifndef _ASM_POWERPC_BARRIER_H | |
5 | #define _ASM_POWERPC_BARRIER_H | |
6 | ||
7 | /* | |
8 | * Memory barrier. | |
9 | * The sync instruction guarantees that all memory accesses initiated | |
10 | * by this processor have been performed (with respect to all other | |
11 | * mechanisms that access memory). The eieio instruction is a barrier | |
12 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
13 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
14 | * | |
15 | * mb() prevents loads and stores being reordered across this point. | |
16 | * rmb() prevents loads being reordered across this point. | |
17 | * wmb() prevents stores being reordered across this point. | |
18 | * read_barrier_depends() prevents data-dependent loads being reordered | |
19 | * across this point (nop on PPC). | |
20 | * | |
21 | * *mb() variants without smp_ prefix must order all types of memory | |
22 | * operations with one another. sync is the only instruction sufficient | |
23 | * to do this. | |
24 | * | |
25 | * For the smp_ barriers, ordering is for cacheable memory operations | |
26 | * only. We have to use the sync instruction for smp_mb(), since lwsync | |
27 | * doesn't order loads with respect to previous stores. Lwsync can be | |
28 | * used for smp_rmb() and smp_wmb(). | |
29 | * | |
30 | * However, on CPUs that don't support lwsync, lwsync actually maps to a | |
31 | * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. | |
32 | */ | |
33 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | |
35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | |
ae3a197e | 36 | |
b92b8b35 | 37 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
ae3a197e | 38 | |
ae3a197e DH |
39 | #ifdef __SUBARCH_HAS_LWSYNC |
40 | # define SMPWMB LWSYNC | |
41 | #else | |
42 | # define SMPWMB eieio | |
43 | #endif | |
44 | ||
47933ad4 | 45 | #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") |
1077fa36 AD |
46 | #define dma_rmb() __lwsync() |
47 | #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | |
48 | ||
49 | #ifdef CONFIG_SMP | |
50 | #define smp_lwsync() __lwsync() | |
47933ad4 | 51 | |
ae3a197e | 52 | #define smp_mb() mb() |
47933ad4 | 53 | #define smp_rmb() __lwsync() |
ae3a197e | 54 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
ae3a197e | 55 | #else |
1077fa36 | 56 | #define smp_lwsync() barrier() |
47933ad4 | 57 | |
ae3a197e DH |
58 | #define smp_mb() barrier() |
59 | #define smp_rmb() barrier() | |
60 | #define smp_wmb() barrier() | |
ae3a197e DH |
61 | #endif /* CONFIG_SMP */ |
62 | ||
8a449718 AD |
63 | #define read_barrier_depends() do { } while (0) |
64 | #define smp_read_barrier_depends() do { } while (0) | |
65 | ||
ae3a197e DH |
66 | /* |
67 | * This is a barrier which prevents following instructions from being | |
68 | * started until the value of the argument x is known. For example, if | |
69 | * x is a variable loaded from memory, this prevents following | |
70 | * instructions from being executed until the load has been performed. | |
71 | */ | |
72 | #define data_barrier(x) \ | |
73 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | |
74 | ||
47933ad4 PZ |
75 | #define smp_store_release(p, v) \ |
76 | do { \ | |
77 | compiletime_assert_atomic_type(*p); \ | |
1077fa36 | 78 | smp_lwsync(); \ |
47933ad4 PZ |
79 | ACCESS_ONCE(*p) = (v); \ |
80 | } while (0) | |
81 | ||
82 | #define smp_load_acquire(p) \ | |
83 | ({ \ | |
84 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | |
85 | compiletime_assert_atomic_type(*p); \ | |
1077fa36 | 86 | smp_lwsync(); \ |
47933ad4 PZ |
87 | ___p1; \ |
88 | }) | |
89 | ||
c645073f PZ |
90 | #define smp_mb__before_atomic() smp_mb() |
91 | #define smp_mb__after_atomic() smp_mb() | |
92 | ||
ae3a197e | 93 | #endif /* _ASM_POWERPC_BARRIER_H */ |