]>
Commit | Line | Data |
---|---|---|
a0616cde DH |
1 | /* |
2 | * Copyright IBM Corp. 1999, 2009 | |
3 | * | |
4 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | |
5 | */ | |
6 | ||
7 | #ifndef __ASM_BARRIER_H | |
8 | #define __ASM_BARRIER_H | |
9 | ||
10 | /* | |
11 | * Force strict CPU ordering. | |
12 | * And yes, this is required on UP too when we're talking | |
13 | * to devices. | |
a0616cde DH |
14 | */ |
15 | ||
e5b8d755 | 16 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
e06ef372 | 17 | /* Fast-BCR without checkpoint synchronization */ |
44230282 | 18 | #define __ASM_BARRIER "bcr 14,0\n" |
e5b8d755 | 19 | #else |
44230282 | 20 | #define __ASM_BARRIER "bcr 15,0\n" |
e5b8d755 | 21 | #endif |
c6f48b0b | 22 | |
44230282 HC |
23 | #define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0) |
24 | ||
1afc82ae CB |
25 | #define rmb() barrier() |
26 | #define wmb() barrier() | |
27 | #define dma_rmb() mb() | |
28 | #define dma_wmb() mb() | |
c6f48b0b HC |
29 | #define smp_mb() mb() |
30 | #define smp_rmb() rmb() | |
31 | #define smp_wmb() wmb() | |
8a449718 AD |
32 | |
33 | #define read_barrier_depends() do { } while (0) | |
34 | #define smp_read_barrier_depends() do { } while (0) | |
0e530747 PZ |
35 | |
36 | #define smp_mb__before_atomic() smp_mb() | |
37 | #define smp_mb__after_atomic() smp_mb() | |
a0616cde | 38 | |
d5a73cad | 39 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) |
a0616cde | 40 | |
47933ad4 PZ |
41 | #define smp_store_release(p, v) \ |
42 | do { \ | |
43 | compiletime_assert_atomic_type(*p); \ | |
44 | barrier(); \ | |
76695af2 | 45 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
46 | } while (0) |
47 | ||
48 | #define smp_load_acquire(p) \ | |
49 | ({ \ | |
76695af2 | 50 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 PZ |
51 | compiletime_assert_atomic_type(*p); \ |
52 | barrier(); \ | |
53 | ___p1; \ | |
54 | }) | |
55 | ||
a0616cde | 56 | #endif /* __ASM_BARRIER_H */ |