]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
7a8a2429 AM |
2 | #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ |
3 | #define _ASM_GENERIC_BITOPS_ATOMIC_H_ | |
4 | ||
5 | #include <asm/types.h> | |
96f951ed | 6 | #include <linux/irqflags.h> |
7a8a2429 | 7 | |
7a8a2429 AM |
8 | #ifdef CONFIG_SMP |
9 | #include <asm/spinlock.h> | |
10 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | |
11 | ||
12 | /* Use an array of spinlocks for our atomic_ts. | |
13 | * Hash function to index into a different SPINLOCK. | |
14 | * Since "a" is usually an address, use one spinlock per cacheline. | |
15 | */ | |
16 | # define ATOMIC_HASH_SIZE 4 | |
17 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | |
18 | ||
445c8951 | 19 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
7a8a2429 AM |
20 | |
21 | /* Can't use raw_spin_lock_irq because of #include problems, so | |
22 | * this is the substitute */ | |
23 | #define _atomic_spin_lock_irqsave(l,f) do { \ | |
445c8951 | 24 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
7a8a2429 | 25 | local_irq_save(f); \ |
0199c4e6 | 26 | arch_spin_lock(s); \ |
7a8a2429 AM |
27 | } while(0) |
28 | ||
29 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
445c8951 | 30 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
0199c4e6 | 31 | arch_spin_unlock(s); \ |
7a8a2429 AM |
32 | local_irq_restore(f); \ |
33 | } while(0) | |
34 | ||
35 | ||
36 | #else | |
37 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
38 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
39 | #endif | |
40 | ||
41 | /* | |
42 | * NMI events can occur at any time, including when interrupts have been | |
43 | * disabled by *_irqsave(). So you can get NMI events occurring while a | |
44 | * *_bit function is holding a spin lock. If the NMI handler also wants | |
45 | * to do bit manipulation (and they do) then you can get a deadlock | |
46 | * between the original caller of *_bit() and the NMI handler. | |
47 | * | |
48 | * by Keith Owens | |
49 | */ | |
50 | ||
51 | /** | |
52 | * set_bit - Atomically set a bit in memory | |
53 | * @nr: the bit to set | |
54 | * @addr: the address to start counting from | |
55 | * | |
56 | * This function is atomic and may not be reordered. See __set_bit() | |
57 | * if you do not require the atomic guarantees. | |
58 | * | |
59 | * Note: there are no guarantees that this function will not be reordered | |
beb7dd86 | 60 | * on non x86 architectures, so if you are writing portable code, |
7a8a2429 AM |
61 | * make sure not to rely on its reordering guarantees. |
62 | * | |
63 | * Note that @nr may be almost arbitrarily large; this function is not | |
64 | * restricted to acting on a single-word quantity. | |
65 | */ | |
66 | static inline void set_bit(int nr, volatile unsigned long *addr) | |
67 | { | |
d05be13b JS |
68 | unsigned long mask = BIT_MASK(nr); |
69 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
7a8a2429 AM |
70 | unsigned long flags; |
71 | ||
72 | _atomic_spin_lock_irqsave(p, flags); | |
73 | *p |= mask; | |
74 | _atomic_spin_unlock_irqrestore(p, flags); | |
75 | } | |
76 | ||
77 | /** | |
78 | * clear_bit - Clears a bit in memory | |
79 | * @nr: Bit to clear | |
80 | * @addr: Address to start counting from | |
81 | * | |
82 | * clear_bit() is atomic and may not be reordered. However, it does | |
83 | * not contain a memory barrier, so if it is used for locking purposes, | |
4e857c58 | 84 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
7a8a2429 AM |
85 | * in order to ensure changes are visible on other processors. |
86 | */ | |
87 | static inline void clear_bit(int nr, volatile unsigned long *addr) | |
88 | { | |
d05be13b JS |
89 | unsigned long mask = BIT_MASK(nr); |
90 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
7a8a2429 AM |
91 | unsigned long flags; |
92 | ||
93 | _atomic_spin_lock_irqsave(p, flags); | |
94 | *p &= ~mask; | |
95 | _atomic_spin_unlock_irqrestore(p, flags); | |
96 | } | |
97 | ||
98 | /** | |
99 | * change_bit - Toggle a bit in memory | |
100 | * @nr: Bit to change | |
101 | * @addr: Address to start counting from | |
102 | * | |
103 | * change_bit() is atomic and may not be reordered. It may be | |
104 | * reordered on other architectures than x86. | |
105 | * Note that @nr may be almost arbitrarily large; this function is not | |
106 | * restricted to acting on a single-word quantity. | |
107 | */ | |
108 | static inline void change_bit(int nr, volatile unsigned long *addr) | |
109 | { | |
d05be13b JS |
110 | unsigned long mask = BIT_MASK(nr); |
111 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
7a8a2429 AM |
112 | unsigned long flags; |
113 | ||
114 | _atomic_spin_lock_irqsave(p, flags); | |
115 | *p ^= mask; | |
116 | _atomic_spin_unlock_irqrestore(p, flags); | |
117 | } | |
118 | ||
119 | /** | |
120 | * test_and_set_bit - Set a bit and return its old value | |
121 | * @nr: Bit to set | |
122 | * @addr: Address to count from | |
123 | * | |
124 | * This operation is atomic and cannot be reordered. | |
125 | * It may be reordered on other architectures than x86. | |
126 | * It also implies a memory barrier. | |
127 | */ | |
128 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |
129 | { | |
d05be13b JS |
130 | unsigned long mask = BIT_MASK(nr); |
131 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
7a8a2429 AM |
132 | unsigned long old; |
133 | unsigned long flags; | |
134 | ||
135 | _atomic_spin_lock_irqsave(p, flags); | |
136 | old = *p; | |
137 | *p = old | mask; | |
138 | _atomic_spin_unlock_irqrestore(p, flags); | |
139 | ||
140 | return (old & mask) != 0; | |
141 | } | |
142 | ||
143 | /** | |
144 | * test_and_clear_bit - Clear a bit and return its old value | |
145 | * @nr: Bit to clear | |
146 | * @addr: Address to count from | |
147 | * | |
148 | * This operation is atomic and cannot be reordered. | |
149 | * It can be reorderdered on other architectures other than x86. | |
150 | * It also implies a memory barrier. | |
151 | */ | |
152 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |
153 | { | |
d05be13b JS |
154 | unsigned long mask = BIT_MASK(nr); |
155 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
7a8a2429 AM |
156 | unsigned long old; |
157 | unsigned long flags; | |
158 | ||
159 | _atomic_spin_lock_irqsave(p, flags); | |
160 | old = *p; | |
161 | *p = old & ~mask; | |
162 | _atomic_spin_unlock_irqrestore(p, flags); | |
163 | ||
164 | return (old & mask) != 0; | |
165 | } | |
166 | ||
167 | /** | |
168 | * test_and_change_bit - Change a bit and return its old value | |
169 | * @nr: Bit to change | |
170 | * @addr: Address to count from | |
171 | * | |
172 | * This operation is atomic and cannot be reordered. | |
173 | * It also implies a memory barrier. | |
174 | */ | |
175 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |
176 | { | |
d05be13b JS |
177 | unsigned long mask = BIT_MASK(nr); |
178 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
7a8a2429 AM |
179 | unsigned long old; |
180 | unsigned long flags; | |
181 | ||
182 | _atomic_spin_lock_irqsave(p, flags); | |
183 | old = *p; | |
184 | *p = old ^ mask; | |
185 | _atomic_spin_unlock_irqrestore(p, flags); | |
186 | ||
187 | return (old & mask) != 0; | |
188 | } | |
189 | ||
190 | #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ |