]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
c140d879 DH |
2 | /* |
3 | * Memory barrier definitions. This is based on information published | |
4 | * in the Processor Abstraction Layer and the System Abstraction Layer | |
5 | * manual. | |
6 | * | |
7 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
8 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
9 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
10 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
11 | */ | |
12 | #ifndef _ASM_IA64_BARRIER_H | |
13 | #define _ASM_IA64_BARRIER_H | |
14 | ||
15 | #include <linux/compiler.h> | |
16 | ||
17 | /* | |
18 | * Macros to force memory ordering. In these descriptions, "previous" | |
19 | * and "subsequent" refer to program order; "visible" means that all | |
20 | * architecturally visible effects of a memory access have occurred | |
21 | * (at a minimum, this means the memory has been read or written). | |
22 | * | |
23 | * wmb(): Guarantees that all preceding stores to memory- | |
24 | * like regions are visible before any subsequent | |
25 | * stores and that all following stores will be | |
26 | * visible only after all previous stores. | |
27 | * rmb(): Like wmb(), but for reads. | |
28 | * mb(): wmb()/rmb() combo, i.e., all previous memory | |
29 | * accesses are visible before all subsequent | |
30 | * accesses and vice versa. This is also known as | |
31 | * a "fence." | |
32 | * | |
33 | * Note: "mb()" and its variants cannot be used as a fence to order | |
34 | * accesses to memory mapped I/O registers. For that, mf.a needs to | |
35 | * be used. However, we don't want to always use mf.a because (a) | |
36 | * it's (presumably) much slower than mf and (b) mf.a is supported for | |
37 | * sequential memory pages only. | |
38 | */ | |
8a449718 AD |
39 | #define mb() ia64_mf() |
40 | #define rmb() mb() | |
41 | #define wmb() mb() | |
c140d879 | 42 | |
1077fa36 AD |
43 | #define dma_rmb() mb() |
44 | #define dma_wmb() mb() | |
45 | ||
eebd1b92 | 46 | # define __smp_mb() mb() |
c140d879 | 47 | |
eebd1b92 MT |
48 | #define __smp_mb__before_atomic() barrier() |
49 | #define __smp_mb__after_atomic() barrier() | |
0cd64efb | 50 | |
47933ad4 PZ |
51 | /* |
52 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no | |
53 | * need for asm trickery! | |
54 | */ | |
55 | ||
eebd1b92 | 56 | #define __smp_store_release(p, v) \ |
47933ad4 PZ |
57 | do { \ |
58 | compiletime_assert_atomic_type(*p); \ | |
59 | barrier(); \ | |
76695af2 | 60 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
61 | } while (0) |
62 | ||
eebd1b92 | 63 | #define __smp_load_acquire(p) \ |
47933ad4 | 64 | ({ \ |
76695af2 | 65 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 PZ |
66 | compiletime_assert_atomic_type(*p); \ |
67 | barrier(); \ | |
68 | ___p1; \ | |
69 | }) | |
70 | ||
c140d879 DH |
71 | /* |
72 | * The group barrier in front of the rsm & ssm are necessary to ensure | |
73 | * that none of the previous instructions in the same group are | |
74 | * affected by the rsm/ssm. | |
75 | */ | |
76 | ||
53a05ac1 MT |
77 | #include <asm-generic/barrier.h> |
78 | ||
c140d879 | 79 | #endif /* _ASM_IA64_BARRIER_H */ |