]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm/include/asm/atomic.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-jammy-kernel.git] / arch / arm / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 */
8 #ifndef __ASM_ARM_ATOMIC_H
9 #define __ASM_ARM_ATOMIC_H
10
11 #include <linux/compiler.h>
12 #include <linux/prefetch.h>
13 #include <linux/types.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17
18 #define ATOMIC_INIT(i) { (i) }
19
20 #ifdef __KERNEL__
21
22 /*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
27 #define atomic_read(v) READ_ONCE((v)->counter)
28 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30 #if __LINUX_ARM_ARCH__ >= 6
31
32 /*
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
35 * to ensure that the update happens.
36 */
37
38 #define ATOMIC_OP(op, c_op, asm_op) \
39 static inline void atomic_##op(int i, atomic_t *v) \
40 { \
41 unsigned long tmp; \
42 int result; \
43 \
44 prefetchw(&v->counter); \
45 __asm__ __volatile__("@ atomic_" #op "\n" \
46 "1: ldrex %0, [%3]\n" \
47 " " #asm_op " %0, %0, %4\n" \
48 " strex %1, %0, [%3]\n" \
49 " teq %1, #0\n" \
50 " bne 1b" \
51 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
52 : "r" (&v->counter), "Ir" (i) \
53 : "cc"); \
54 } \
55
56 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
57 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
58 { \
59 unsigned long tmp; \
60 int result; \
61 \
62 prefetchw(&v->counter); \
63 \
64 __asm__ __volatile__("@ atomic_" #op "_return\n" \
65 "1: ldrex %0, [%3]\n" \
66 " " #asm_op " %0, %0, %4\n" \
67 " strex %1, %0, [%3]\n" \
68 " teq %1, #0\n" \
69 " bne 1b" \
70 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
71 : "r" (&v->counter), "Ir" (i) \
72 : "cc"); \
73 \
74 return result; \
75 }
76
77 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
78 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
79 { \
80 unsigned long tmp; \
81 int result, val; \
82 \
83 prefetchw(&v->counter); \
84 \
85 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
86 "1: ldrex %0, [%4]\n" \
87 " " #asm_op " %1, %0, %5\n" \
88 " strex %2, %1, [%4]\n" \
89 " teq %2, #0\n" \
90 " bne 1b" \
91 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
92 : "r" (&v->counter), "Ir" (i) \
93 : "cc"); \
94 \
95 return result; \
96 }
97
98 #define atomic_add_return_relaxed atomic_add_return_relaxed
99 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
100 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
101 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
102
103 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
104 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
105 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
106 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
107
108 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
109 {
110 int oldval;
111 unsigned long res;
112
113 prefetchw(&ptr->counter);
114
115 do {
116 __asm__ __volatile__("@ atomic_cmpxchg\n"
117 "ldrex %1, [%3]\n"
118 "mov %0, #0\n"
119 "teq %1, %4\n"
120 "strexeq %0, %5, [%3]\n"
121 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
122 : "r" (&ptr->counter), "Ir" (old), "r" (new)
123 : "cc");
124 } while (res);
125
126 return oldval;
127 }
128 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
129
130 static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
131 {
132 int oldval, newval;
133 unsigned long tmp;
134
135 smp_mb();
136 prefetchw(&v->counter);
137
138 __asm__ __volatile__ ("@ atomic_add_unless\n"
139 "1: ldrex %0, [%4]\n"
140 " teq %0, %5\n"
141 " beq 2f\n"
142 " add %1, %0, %6\n"
143 " strex %2, %1, [%4]\n"
144 " teq %2, #0\n"
145 " bne 1b\n"
146 "2:"
147 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
148 : "r" (&v->counter), "r" (u), "r" (a)
149 : "cc");
150
151 if (oldval != u)
152 smp_mb();
153
154 return oldval;
155 }
156 #define atomic_fetch_add_unless atomic_fetch_add_unless
157
158 #else /* ARM_ARCH_6 */
159
160 #ifdef CONFIG_SMP
161 #error SMP not supported on pre-ARMv6 CPUs
162 #endif
163
164 #define ATOMIC_OP(op, c_op, asm_op) \
165 static inline void atomic_##op(int i, atomic_t *v) \
166 { \
167 unsigned long flags; \
168 \
169 raw_local_irq_save(flags); \
170 v->counter c_op i; \
171 raw_local_irq_restore(flags); \
172 } \
173
174 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
175 static inline int atomic_##op##_return(int i, atomic_t *v) \
176 { \
177 unsigned long flags; \
178 int val; \
179 \
180 raw_local_irq_save(flags); \
181 v->counter c_op i; \
182 val = v->counter; \
183 raw_local_irq_restore(flags); \
184 \
185 return val; \
186 }
187
188 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
189 static inline int atomic_fetch_##op(int i, atomic_t *v) \
190 { \
191 unsigned long flags; \
192 int val; \
193 \
194 raw_local_irq_save(flags); \
195 val = v->counter; \
196 v->counter c_op i; \
197 raw_local_irq_restore(flags); \
198 \
199 return val; \
200 }
201
202 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
203 {
204 int ret;
205 unsigned long flags;
206
207 raw_local_irq_save(flags);
208 ret = v->counter;
209 if (likely(ret == old))
210 v->counter = new;
211 raw_local_irq_restore(flags);
212
213 return ret;
214 }
215
216 #define atomic_fetch_andnot atomic_fetch_andnot
217
218 #endif /* __LINUX_ARM_ARCH__ */
219
220 #define ATOMIC_OPS(op, c_op, asm_op) \
221 ATOMIC_OP(op, c_op, asm_op) \
222 ATOMIC_OP_RETURN(op, c_op, asm_op) \
223 ATOMIC_FETCH_OP(op, c_op, asm_op)
224
225 ATOMIC_OPS(add, +=, add)
226 ATOMIC_OPS(sub, -=, sub)
227
228 #define atomic_andnot atomic_andnot
229
230 #undef ATOMIC_OPS
231 #define ATOMIC_OPS(op, c_op, asm_op) \
232 ATOMIC_OP(op, c_op, asm_op) \
233 ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235 ATOMIC_OPS(and, &=, and)
236 ATOMIC_OPS(andnot, &= ~, bic)
237 ATOMIC_OPS(or, |=, orr)
238 ATOMIC_OPS(xor, ^=, eor)
239
240 #undef ATOMIC_OPS
241 #undef ATOMIC_FETCH_OP
242 #undef ATOMIC_OP_RETURN
243 #undef ATOMIC_OP
244
245 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
246
247 #ifndef CONFIG_GENERIC_ATOMIC64
248 typedef struct {
249 long long counter;
250 } atomic64_t;
251
252 #define ATOMIC64_INIT(i) { (i) }
253
254 #ifdef CONFIG_ARM_LPAE
255 static inline long long atomic64_read(const atomic64_t *v)
256 {
257 long long result;
258
259 __asm__ __volatile__("@ atomic64_read\n"
260 " ldrd %0, %H0, [%1]"
261 : "=&r" (result)
262 : "r" (&v->counter), "Qo" (v->counter)
263 );
264
265 return result;
266 }
267
268 static inline void atomic64_set(atomic64_t *v, long long i)
269 {
270 __asm__ __volatile__("@ atomic64_set\n"
271 " strd %2, %H2, [%1]"
272 : "=Qo" (v->counter)
273 : "r" (&v->counter), "r" (i)
274 );
275 }
276 #else
277 static inline long long atomic64_read(const atomic64_t *v)
278 {
279 long long result;
280
281 __asm__ __volatile__("@ atomic64_read\n"
282 " ldrexd %0, %H0, [%1]"
283 : "=&r" (result)
284 : "r" (&v->counter), "Qo" (v->counter)
285 );
286
287 return result;
288 }
289
290 static inline void atomic64_set(atomic64_t *v, long long i)
291 {
292 long long tmp;
293
294 prefetchw(&v->counter);
295 __asm__ __volatile__("@ atomic64_set\n"
296 "1: ldrexd %0, %H0, [%2]\n"
297 " strexd %0, %3, %H3, [%2]\n"
298 " teq %0, #0\n"
299 " bne 1b"
300 : "=&r" (tmp), "=Qo" (v->counter)
301 : "r" (&v->counter), "r" (i)
302 : "cc");
303 }
304 #endif
305
306 #define ATOMIC64_OP(op, op1, op2) \
307 static inline void atomic64_##op(long long i, atomic64_t *v) \
308 { \
309 long long result; \
310 unsigned long tmp; \
311 \
312 prefetchw(&v->counter); \
313 __asm__ __volatile__("@ atomic64_" #op "\n" \
314 "1: ldrexd %0, %H0, [%3]\n" \
315 " " #op1 " %Q0, %Q0, %Q4\n" \
316 " " #op2 " %R0, %R0, %R4\n" \
317 " strexd %1, %0, %H0, [%3]\n" \
318 " teq %1, #0\n" \
319 " bne 1b" \
320 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
321 : "r" (&v->counter), "r" (i) \
322 : "cc"); \
323 } \
324
325 #define ATOMIC64_OP_RETURN(op, op1, op2) \
326 static inline long long \
327 atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
328 { \
329 long long result; \
330 unsigned long tmp; \
331 \
332 prefetchw(&v->counter); \
333 \
334 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
335 "1: ldrexd %0, %H0, [%3]\n" \
336 " " #op1 " %Q0, %Q0, %Q4\n" \
337 " " #op2 " %R0, %R0, %R4\n" \
338 " strexd %1, %0, %H0, [%3]\n" \
339 " teq %1, #0\n" \
340 " bne 1b" \
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
342 : "r" (&v->counter), "r" (i) \
343 : "cc"); \
344 \
345 return result; \
346 }
347
348 #define ATOMIC64_FETCH_OP(op, op1, op2) \
349 static inline long long \
350 atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
351 { \
352 long long result, val; \
353 unsigned long tmp; \
354 \
355 prefetchw(&v->counter); \
356 \
357 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
358 "1: ldrexd %0, %H0, [%4]\n" \
359 " " #op1 " %Q1, %Q0, %Q5\n" \
360 " " #op2 " %R1, %R0, %R5\n" \
361 " strexd %2, %1, %H1, [%4]\n" \
362 " teq %2, #0\n" \
363 " bne 1b" \
364 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
365 : "r" (&v->counter), "r" (i) \
366 : "cc"); \
367 \
368 return result; \
369 }
370
371 #define ATOMIC64_OPS(op, op1, op2) \
372 ATOMIC64_OP(op, op1, op2) \
373 ATOMIC64_OP_RETURN(op, op1, op2) \
374 ATOMIC64_FETCH_OP(op, op1, op2)
375
376 ATOMIC64_OPS(add, adds, adc)
377 ATOMIC64_OPS(sub, subs, sbc)
378
379 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
380 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
381 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
382 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
383
384 #undef ATOMIC64_OPS
385 #define ATOMIC64_OPS(op, op1, op2) \
386 ATOMIC64_OP(op, op1, op2) \
387 ATOMIC64_FETCH_OP(op, op1, op2)
388
389 #define atomic64_andnot atomic64_andnot
390
391 ATOMIC64_OPS(and, and, and)
392 ATOMIC64_OPS(andnot, bic, bic)
393 ATOMIC64_OPS(or, orr, orr)
394 ATOMIC64_OPS(xor, eor, eor)
395
396 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
397 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
398 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
399 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
400
401 #undef ATOMIC64_OPS
402 #undef ATOMIC64_FETCH_OP
403 #undef ATOMIC64_OP_RETURN
404 #undef ATOMIC64_OP
405
406 static inline long long
407 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
408 {
409 long long oldval;
410 unsigned long res;
411
412 prefetchw(&ptr->counter);
413
414 do {
415 __asm__ __volatile__("@ atomic64_cmpxchg\n"
416 "ldrexd %1, %H1, [%3]\n"
417 "mov %0, #0\n"
418 "teq %1, %4\n"
419 "teqeq %H1, %H4\n"
420 "strexdeq %0, %5, %H5, [%3]"
421 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
422 : "r" (&ptr->counter), "r" (old), "r" (new)
423 : "cc");
424 } while (res);
425
426 return oldval;
427 }
428 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
429
430 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
431 {
432 long long result;
433 unsigned long tmp;
434
435 prefetchw(&ptr->counter);
436
437 __asm__ __volatile__("@ atomic64_xchg\n"
438 "1: ldrexd %0, %H0, [%3]\n"
439 " strexd %1, %4, %H4, [%3]\n"
440 " teq %1, #0\n"
441 " bne 1b"
442 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
443 : "r" (&ptr->counter), "r" (new)
444 : "cc");
445
446 return result;
447 }
448 #define atomic64_xchg_relaxed atomic64_xchg_relaxed
449
450 static inline long long atomic64_dec_if_positive(atomic64_t *v)
451 {
452 long long result;
453 unsigned long tmp;
454
455 smp_mb();
456 prefetchw(&v->counter);
457
458 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
459 "1: ldrexd %0, %H0, [%3]\n"
460 " subs %Q0, %Q0, #1\n"
461 " sbc %R0, %R0, #0\n"
462 " teq %R0, #0\n"
463 " bmi 2f\n"
464 " strexd %1, %0, %H0, [%3]\n"
465 " teq %1, #0\n"
466 " bne 1b\n"
467 "2:"
468 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
469 : "r" (&v->counter)
470 : "cc");
471
472 smp_mb();
473
474 return result;
475 }
476 #define atomic64_dec_if_positive atomic64_dec_if_positive
477
478 static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
479 long long u)
480 {
481 long long oldval, newval;
482 unsigned long tmp;
483
484 smp_mb();
485 prefetchw(&v->counter);
486
487 __asm__ __volatile__("@ atomic64_add_unless\n"
488 "1: ldrexd %0, %H0, [%4]\n"
489 " teq %0, %5\n"
490 " teqeq %H0, %H5\n"
491 " beq 2f\n"
492 " adds %Q1, %Q0, %Q6\n"
493 " adc %R1, %R0, %R6\n"
494 " strexd %2, %1, %H1, [%4]\n"
495 " teq %2, #0\n"
496 " bne 1b\n"
497 "2:"
498 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
499 : "r" (&v->counter), "r" (u), "r" (a)
500 : "cc");
501
502 if (oldval != u)
503 smp_mb();
504
505 return oldval;
506 }
507 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
508
509 #endif /* !CONFIG_GENERIC_ATOMIC64 */
510 #endif
511 #endif