]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/x86/include/asm/atomic64_64.h
x86/atomic: Fix smp_mb__{before,after}_atomic()
[mirror_ubuntu-focal-kernel.git] / arch / x86 / include / asm / atomic64_64.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1a3b1d89
BG
2#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9/* The 64-bit atomic type */
10
11#define ATOMIC64_INIT(i) { (i) }
12
13/**
8bf705d1 14 * arch_atomic64_read - read atomic64 variable
1a3b1d89
BG
15 * @v: pointer of type atomic64_t
16 *
17 * Atomically reads the value of @v.
18 * Doesn't imply a read memory barrier.
19 */
79c53a83 20static inline s64 arch_atomic64_read(const atomic64_t *v)
1a3b1d89 21{
62e8a325 22 return READ_ONCE((v)->counter);
1a3b1d89
BG
23}
24
25/**
8bf705d1 26 * arch_atomic64_set - set atomic64 variable
1a3b1d89
BG
27 * @v: pointer to type atomic64_t
28 * @i: required value
29 *
30 * Atomically sets the value of @v to @i.
31 */
79c53a83 32static inline void arch_atomic64_set(atomic64_t *v, s64 i)
1a3b1d89 33{
62e8a325 34 WRITE_ONCE(v->counter, i);
1a3b1d89
BG
35}
36
37/**
8bf705d1 38 * arch_atomic64_add - add integer to atomic64 variable
1a3b1d89
BG
39 * @i: integer value to add
40 * @v: pointer to type atomic64_t
41 *
42 * Atomically adds @i to @v.
43 */
79c53a83 44static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
1a3b1d89
BG
45{
46 asm volatile(LOCK_PREFIX "addq %1,%0"
47 : "=m" (v->counter)
69d927bb 48 : "er" (i), "m" (v->counter) : "memory");
1a3b1d89
BG
49}
50
51/**
8bf705d1 52 * arch_atomic64_sub - subtract the atomic64 variable
1a3b1d89
BG
53 * @i: integer value to subtract
54 * @v: pointer to type atomic64_t
55 *
56 * Atomically subtracts @i from @v.
57 */
79c53a83 58static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
1a3b1d89
BG
59{
60 asm volatile(LOCK_PREFIX "subq %1,%0"
61 : "=m" (v->counter)
69d927bb 62 : "er" (i), "m" (v->counter) : "memory");
1a3b1d89
BG
63}
64
65/**
8bf705d1 66 * arch_atomic64_sub_and_test - subtract value from variable and test result
1a3b1d89
BG
67 * @i: integer value to subtract
68 * @v: pointer to type atomic64_t
69 *
70 * Atomically subtracts @i from @v and returns
71 * true if the result is zero, or false for all
72 * other cases.
73 */
79c53a83 74static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
1a3b1d89 75{
288e4521 76 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
1a3b1d89 77}
4331f4d5 78#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
1a3b1d89
BG
79
80/**
8bf705d1 81 * arch_atomic64_inc - increment atomic64 variable
1a3b1d89
BG
82 * @v: pointer to type atomic64_t
83 *
84 * Atomically increments @v by 1.
85 */
8bf705d1 86static __always_inline void arch_atomic64_inc(atomic64_t *v)
1a3b1d89
BG
87{
88 asm volatile(LOCK_PREFIX "incq %0"
89 : "=m" (v->counter)
69d927bb 90 : "m" (v->counter) : "memory");
1a3b1d89 91}
4331f4d5 92#define arch_atomic64_inc arch_atomic64_inc
1a3b1d89
BG
93
94/**
8bf705d1 95 * arch_atomic64_dec - decrement atomic64 variable
1a3b1d89
BG
96 * @v: pointer to type atomic64_t
97 *
98 * Atomically decrements @v by 1.
99 */
8bf705d1 100static __always_inline void arch_atomic64_dec(atomic64_t *v)
1a3b1d89
BG
101{
102 asm volatile(LOCK_PREFIX "decq %0"
103 : "=m" (v->counter)
69d927bb 104 : "m" (v->counter) : "memory");
1a3b1d89 105}
4331f4d5 106#define arch_atomic64_dec arch_atomic64_dec
1a3b1d89
BG
107
108/**
8bf705d1 109 * arch_atomic64_dec_and_test - decrement and test
1a3b1d89
BG
110 * @v: pointer to type atomic64_t
111 *
112 * Atomically decrements @v by 1 and
113 * returns true if the result is 0, or false for all other
114 * cases.
115 */
8bf705d1 116static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
1a3b1d89 117{
288e4521 118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
1a3b1d89 119}
4331f4d5 120#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
1a3b1d89
BG
121
122/**
8bf705d1 123 * arch_atomic64_inc_and_test - increment and test
1a3b1d89
BG
124 * @v: pointer to type atomic64_t
125 *
126 * Atomically increments @v by 1
127 * and returns true if the result is zero, or false for all
128 * other cases.
129 */
8bf705d1 130static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
1a3b1d89 131{
288e4521 132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
1a3b1d89 133}
4331f4d5 134#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
1a3b1d89
BG
135
136/**
8bf705d1 137 * arch_atomic64_add_negative - add and test if negative
1a3b1d89
BG
138 * @i: integer value to add
139 * @v: pointer to type atomic64_t
140 *
141 * Atomically adds @i to @v and returns true
142 * if the result is negative, or false when
143 * result is greater than or equal to zero.
144 */
79c53a83 145static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
1a3b1d89 146{
288e4521 147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
1a3b1d89 148}
4331f4d5 149#define arch_atomic64_add_negative arch_atomic64_add_negative
1a3b1d89
BG
150
151/**
8bf705d1 152 * arch_atomic64_add_return - add and return
1a3b1d89
BG
153 * @i: integer value to add
154 * @v: pointer to type atomic64_t
155 *
156 * Atomically adds @i to @v and returns @i + @v
157 */
79c53a83 158static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
1a3b1d89 159{
8b8bc2f7 160 return i + xadd(&v->counter, i);
1a3b1d89
BG
161}
162
79c53a83 163static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
1a3b1d89 164{
8bf705d1 165 return arch_atomic64_add_return(-i, v);
1a3b1d89
BG
166}
167
79c53a83 168static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
a8bcccab
PZ
169{
170 return xadd(&v->counter, i);
171}
172
79c53a83 173static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
a8bcccab
PZ
174{
175 return xadd(&v->counter, -i);
176}
177
79c53a83 178static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
1a3b1d89 179{
8bf705d1 180 return arch_cmpxchg(&v->counter, old, new);
1a3b1d89
BG
181}
182
8bf705d1 183#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
79c53a83 184static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
a9ebf306
PZ
185{
186 return try_cmpxchg(&v->counter, old, new);
187}
188
79c53a83 189static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
1a3b1d89 190{
f9881cc4 191 return arch_xchg(&v->counter, new);
1a3b1d89
BG
192}
193
79c53a83 194static inline void arch_atomic64_and(s64 i, atomic64_t *v)
ba1c9f83
DV
195{
196 asm volatile(LOCK_PREFIX "andq %1,%0"
197 : "+m" (v->counter)
198 : "er" (i)
199 : "memory");
7fc1845d
PZ
200}
201
79c53a83 202static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
ba1c9f83 203{
8bf705d1 204 s64 val = arch_atomic64_read(v);
ba1c9f83
DV
205
206 do {
8bf705d1 207 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
ba1c9f83 208 return val;
a8bcccab
PZ
209}
210
79c53a83 211static inline void arch_atomic64_or(s64 i, atomic64_t *v)
ba1c9f83
DV
212{
213 asm volatile(LOCK_PREFIX "orq %1,%0"
214 : "+m" (v->counter)
215 : "er" (i)
216 : "memory");
217}
a8bcccab 218
79c53a83 219static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
ba1c9f83 220{
8bf705d1 221 s64 val = arch_atomic64_read(v);
7fc1845d 222
ba1c9f83 223 do {
8bf705d1 224 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
ba1c9f83
DV
225 return val;
226}
227
79c53a83 228static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
ba1c9f83
DV
229{
230 asm volatile(LOCK_PREFIX "xorq %1,%0"
231 : "+m" (v->counter)
232 : "er" (i)
233 : "memory");
234}
235
79c53a83 236static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
ba1c9f83 237{
8bf705d1 238 s64 val = arch_atomic64_read(v);
ba1c9f83
DV
239
240 do {
8bf705d1 241 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
ba1c9f83
DV
242 return val;
243}
7fc1845d 244
1a3b1d89 245#endif /* _ASM_X86_ATOMIC64_64_H */