]>
Commit | Line | Data |
---|---|---|
c0385b24 WD |
1 | /* |
2 | * Based on arch/arm/include/asm/atomic.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * Copyright (C) 2012 ARM Ltd. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #ifndef __ASM_ATOMIC_LSE_H | |
22 | #define __ASM_ATOMIC_LSE_H | |
23 | ||
24 | #ifndef __ARM64_IN_ATOMIC_IMPL | |
25 | #error "please don't include this file directly" | |
26 | #endif | |
27 | ||
c09d6a04 | 28 | #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) |
6822a84d WD |
29 | #define ATOMIC_OP(op, asm_op) \ |
30 | static inline void atomic_##op(int i, atomic_t *v) \ | |
31 | { \ | |
32 | register int w0 asm ("w0") = i; \ | |
33 | register atomic_t *x1 asm ("x1") = v; \ | |
34 | \ | |
35 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ | |
36 | " " #asm_op " %w[i], %[v]\n") \ | |
37 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | |
38 | : "r" (x1) \ | |
39 | : __LL_SC_CLOBBERS); \ | |
c09d6a04 WD |
40 | } |
41 | ||
6822a84d WD |
42 | ATOMIC_OP(andnot, stclr) |
43 | ATOMIC_OP(or, stset) | |
44 | ATOMIC_OP(xor, steor) | |
45 | ATOMIC_OP(add, stadd) | |
c09d6a04 | 46 | |
6822a84d | 47 | #undef ATOMIC_OP |
c09d6a04 | 48 | |
2efe95fe WD |
49 | #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ |
50 | static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ | |
51 | { \ | |
52 | register int w0 asm ("w0") = i; \ | |
53 | register atomic_t *x1 asm ("x1") = v; \ | |
54 | \ | |
55 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
56 | /* LL/SC */ \ | |
57 | __LL_SC_ATOMIC(fetch_##op##name), \ | |
58 | /* LSE atomics */ \ | |
59 | " " #asm_op #mb " %w[i], %w[i], %[v]") \ | |
60 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | |
61 | : "r" (x1) \ | |
62 | : __LL_SC_CLOBBERS, ##cl); \ | |
63 | \ | |
64 | return w0; \ | |
65 | } | |
66 | ||
67 | #define ATOMIC_FETCH_OPS(op, asm_op) \ | |
68 | ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \ | |
69 | ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \ | |
70 | ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \ | |
71 | ATOMIC_FETCH_OP( , al, op, asm_op, "memory") | |
72 | ||
73 | ATOMIC_FETCH_OPS(andnot, ldclr) | |
74 | ATOMIC_FETCH_OPS(or, ldset) | |
75 | ATOMIC_FETCH_OPS(xor, ldeor) | |
76 | ATOMIC_FETCH_OPS(add, ldadd) | |
77 | ||
78 | #undef ATOMIC_FETCH_OP | |
79 | #undef ATOMIC_FETCH_OPS | |
80 | ||
305d454a WD |
81 | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ |
82 | static inline int atomic_add_return##name(int i, atomic_t *v) \ | |
83 | { \ | |
84 | register int w0 asm ("w0") = i; \ | |
85 | register atomic_t *x1 asm ("x1") = v; \ | |
86 | \ | |
87 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
88 | /* LL/SC */ \ | |
05492f2f WD |
89 | __LL_SC_ATOMIC(add_return##name) \ |
90 | __nops(1), \ | |
305d454a WD |
91 | /* LSE atomics */ \ |
92 | " ldadd" #mb " %w[i], w30, %[v]\n" \ | |
93 | " add %w[i], %w[i], w30") \ | |
94 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | |
95 | : "r" (x1) \ | |
5be8b70a | 96 | : __LL_SC_CLOBBERS, ##cl); \ |
305d454a WD |
97 | \ |
98 | return w0; \ | |
99 | } | |
c09d6a04 | 100 | |
305d454a WD |
101 | ATOMIC_OP_ADD_RETURN(_relaxed, ) |
102 | ATOMIC_OP_ADD_RETURN(_acquire, a, "memory") | |
103 | ATOMIC_OP_ADD_RETURN(_release, l, "memory") | |
104 | ATOMIC_OP_ADD_RETURN( , al, "memory") | |
c09d6a04 | 105 | |
305d454a | 106 | #undef ATOMIC_OP_ADD_RETURN |
c09d6a04 WD |
107 | |
108 | static inline void atomic_and(int i, atomic_t *v) | |
109 | { | |
110 | register int w0 asm ("w0") = i; | |
111 | register atomic_t *x1 asm ("x1") = v; | |
112 | ||
113 | asm volatile(ARM64_LSE_ATOMIC_INSN( | |
114 | /* LL/SC */ | |
05492f2f WD |
115 | __LL_SC_ATOMIC(and) |
116 | __nops(1), | |
c09d6a04 WD |
117 | /* LSE atomics */ |
118 | " mvn %w[i], %w[i]\n" | |
119 | " stclr %w[i], %[v]") | |
120 | : [i] "+r" (w0), [v] "+Q" (v->counter) | |
121 | : "r" (x1) | |
5be8b70a | 122 | : __LL_SC_CLOBBERS); |
c09d6a04 WD |
123 | } |
124 | ||
2efe95fe WD |
125 | #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ |
126 | static inline int atomic_fetch_and##name(int i, atomic_t *v) \ | |
127 | { \ | |
128 | register int w0 asm ("w0") = i; \ | |
129 | register atomic_t *x1 asm ("x1") = v; \ | |
130 | \ | |
131 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
132 | /* LL/SC */ \ | |
05492f2f WD |
133 | __LL_SC_ATOMIC(fetch_and##name) \ |
134 | __nops(1), \ | |
2efe95fe WD |
135 | /* LSE atomics */ \ |
136 | " mvn %w[i], %w[i]\n" \ | |
137 | " ldclr" #mb " %w[i], %w[i], %[v]") \ | |
138 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | |
139 | : "r" (x1) \ | |
140 | : __LL_SC_CLOBBERS, ##cl); \ | |
141 | \ | |
142 | return w0; \ | |
143 | } | |
144 | ||
145 | ATOMIC_FETCH_OP_AND(_relaxed, ) | |
146 | ATOMIC_FETCH_OP_AND(_acquire, a, "memory") | |
147 | ATOMIC_FETCH_OP_AND(_release, l, "memory") | |
148 | ATOMIC_FETCH_OP_AND( , al, "memory") | |
149 | ||
150 | #undef ATOMIC_FETCH_OP_AND | |
151 | ||
c09d6a04 WD |
152 | static inline void atomic_sub(int i, atomic_t *v) |
153 | { | |
154 | register int w0 asm ("w0") = i; | |
155 | register atomic_t *x1 asm ("x1") = v; | |
156 | ||
157 | asm volatile(ARM64_LSE_ATOMIC_INSN( | |
158 | /* LL/SC */ | |
05492f2f WD |
159 | __LL_SC_ATOMIC(sub) |
160 | __nops(1), | |
c09d6a04 WD |
161 | /* LSE atomics */ |
162 | " neg %w[i], %w[i]\n" | |
163 | " stadd %w[i], %[v]") | |
164 | : [i] "+r" (w0), [v] "+Q" (v->counter) | |
165 | : "r" (x1) | |
5be8b70a | 166 | : __LL_SC_CLOBBERS); |
c09d6a04 WD |
167 | } |
168 | ||
305d454a WD |
169 | #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ |
170 | static inline int atomic_sub_return##name(int i, atomic_t *v) \ | |
171 | { \ | |
172 | register int w0 asm ("w0") = i; \ | |
173 | register atomic_t *x1 asm ("x1") = v; \ | |
174 | \ | |
175 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
176 | /* LL/SC */ \ | |
305d454a | 177 | __LL_SC_ATOMIC(sub_return##name) \ |
05492f2f | 178 | __nops(2), \ |
305d454a WD |
179 | /* LSE atomics */ \ |
180 | " neg %w[i], %w[i]\n" \ | |
181 | " ldadd" #mb " %w[i], w30, %[v]\n" \ | |
182 | " add %w[i], %w[i], w30") \ | |
183 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | |
184 | : "r" (x1) \ | |
5be8b70a | 185 | : __LL_SC_CLOBBERS , ##cl); \ |
305d454a WD |
186 | \ |
187 | return w0; \ | |
c09d6a04 | 188 | } |
c0385b24 | 189 | |
305d454a WD |
190 | ATOMIC_OP_SUB_RETURN(_relaxed, ) |
191 | ATOMIC_OP_SUB_RETURN(_acquire, a, "memory") | |
192 | ATOMIC_OP_SUB_RETURN(_release, l, "memory") | |
193 | ATOMIC_OP_SUB_RETURN( , al, "memory") | |
194 | ||
195 | #undef ATOMIC_OP_SUB_RETURN | |
2efe95fe WD |
196 | |
197 | #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ | |
198 | static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ | |
199 | { \ | |
200 | register int w0 asm ("w0") = i; \ | |
201 | register atomic_t *x1 asm ("x1") = v; \ | |
202 | \ | |
203 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
204 | /* LL/SC */ \ | |
05492f2f WD |
205 | __LL_SC_ATOMIC(fetch_sub##name) \ |
206 | __nops(1), \ | |
2efe95fe WD |
207 | /* LSE atomics */ \ |
208 | " neg %w[i], %w[i]\n" \ | |
209 | " ldadd" #mb " %w[i], %w[i], %[v]") \ | |
210 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | |
211 | : "r" (x1) \ | |
212 | : __LL_SC_CLOBBERS, ##cl); \ | |
213 | \ | |
214 | return w0; \ | |
215 | } | |
216 | ||
217 | ATOMIC_FETCH_OP_SUB(_relaxed, ) | |
218 | ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") | |
219 | ATOMIC_FETCH_OP_SUB(_release, l, "memory") | |
220 | ATOMIC_FETCH_OP_SUB( , al, "memory") | |
221 | ||
222 | #undef ATOMIC_FETCH_OP_SUB | |
c09d6a04 | 223 | #undef __LL_SC_ATOMIC |
c0385b24 | 224 | |
c09d6a04 | 225 | #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) |
6822a84d WD |
226 | #define ATOMIC64_OP(op, asm_op) \ |
227 | static inline void atomic64_##op(long i, atomic64_t *v) \ | |
228 | { \ | |
229 | register long x0 asm ("x0") = i; \ | |
230 | register atomic64_t *x1 asm ("x1") = v; \ | |
231 | \ | |
232 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ | |
233 | " " #asm_op " %[i], %[v]\n") \ | |
234 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | |
235 | : "r" (x1) \ | |
236 | : __LL_SC_CLOBBERS); \ | |
c09d6a04 WD |
237 | } |
238 | ||
6822a84d WD |
239 | ATOMIC64_OP(andnot, stclr) |
240 | ATOMIC64_OP(or, stset) | |
241 | ATOMIC64_OP(xor, steor) | |
242 | ATOMIC64_OP(add, stadd) | |
c09d6a04 | 243 | |
6822a84d | 244 | #undef ATOMIC64_OP |
c09d6a04 | 245 | |
2efe95fe WD |
246 | #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ |
247 | static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ | |
248 | { \ | |
249 | register long x0 asm ("x0") = i; \ | |
250 | register atomic64_t *x1 asm ("x1") = v; \ | |
251 | \ | |
252 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
253 | /* LL/SC */ \ | |
254 | __LL_SC_ATOMIC64(fetch_##op##name), \ | |
255 | /* LSE atomics */ \ | |
256 | " " #asm_op #mb " %[i], %[i], %[v]") \ | |
257 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | |
258 | : "r" (x1) \ | |
259 | : __LL_SC_CLOBBERS, ##cl); \ | |
260 | \ | |
261 | return x0; \ | |
262 | } | |
263 | ||
264 | #define ATOMIC64_FETCH_OPS(op, asm_op) \ | |
265 | ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \ | |
266 | ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \ | |
267 | ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \ | |
268 | ATOMIC64_FETCH_OP( , al, op, asm_op, "memory") | |
269 | ||
270 | ATOMIC64_FETCH_OPS(andnot, ldclr) | |
271 | ATOMIC64_FETCH_OPS(or, ldset) | |
272 | ATOMIC64_FETCH_OPS(xor, ldeor) | |
273 | ATOMIC64_FETCH_OPS(add, ldadd) | |
274 | ||
275 | #undef ATOMIC64_FETCH_OP | |
276 | #undef ATOMIC64_FETCH_OPS | |
277 | ||
305d454a WD |
278 | #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ |
279 | static inline long atomic64_add_return##name(long i, atomic64_t *v) \ | |
280 | { \ | |
281 | register long x0 asm ("x0") = i; \ | |
282 | register atomic64_t *x1 asm ("x1") = v; \ | |
283 | \ | |
284 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
285 | /* LL/SC */ \ | |
05492f2f WD |
286 | __LL_SC_ATOMIC64(add_return##name) \ |
287 | __nops(1), \ | |
305d454a WD |
288 | /* LSE atomics */ \ |
289 | " ldadd" #mb " %[i], x30, %[v]\n" \ | |
290 | " add %[i], %[i], x30") \ | |
291 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | |
292 | : "r" (x1) \ | |
5be8b70a | 293 | : __LL_SC_CLOBBERS, ##cl); \ |
305d454a WD |
294 | \ |
295 | return x0; \ | |
296 | } | |
c09d6a04 | 297 | |
305d454a WD |
298 | ATOMIC64_OP_ADD_RETURN(_relaxed, ) |
299 | ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory") | |
300 | ATOMIC64_OP_ADD_RETURN(_release, l, "memory") | |
301 | ATOMIC64_OP_ADD_RETURN( , al, "memory") | |
c09d6a04 | 302 | |
305d454a | 303 | #undef ATOMIC64_OP_ADD_RETURN |
c09d6a04 WD |
304 | |
305 | static inline void atomic64_and(long i, atomic64_t *v) | |
306 | { | |
307 | register long x0 asm ("x0") = i; | |
308 | register atomic64_t *x1 asm ("x1") = v; | |
309 | ||
310 | asm volatile(ARM64_LSE_ATOMIC_INSN( | |
311 | /* LL/SC */ | |
05492f2f WD |
312 | __LL_SC_ATOMIC64(and) |
313 | __nops(1), | |
c09d6a04 WD |
314 | /* LSE atomics */ |
315 | " mvn %[i], %[i]\n" | |
316 | " stclr %[i], %[v]") | |
317 | : [i] "+r" (x0), [v] "+Q" (v->counter) | |
318 | : "r" (x1) | |
5be8b70a | 319 | : __LL_SC_CLOBBERS); |
c09d6a04 WD |
320 | } |
321 | ||
2efe95fe WD |
322 | #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ |
323 | static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ | |
324 | { \ | |
8997c934 | 325 | register long x0 asm ("x0") = i; \ |
2efe95fe WD |
326 | register atomic64_t *x1 asm ("x1") = v; \ |
327 | \ | |
328 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
329 | /* LL/SC */ \ | |
05492f2f WD |
330 | __LL_SC_ATOMIC64(fetch_and##name) \ |
331 | __nops(1), \ | |
2efe95fe WD |
332 | /* LSE atomics */ \ |
333 | " mvn %[i], %[i]\n" \ | |
334 | " ldclr" #mb " %[i], %[i], %[v]") \ | |
335 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | |
336 | : "r" (x1) \ | |
337 | : __LL_SC_CLOBBERS, ##cl); \ | |
338 | \ | |
339 | return x0; \ | |
340 | } | |
341 | ||
342 | ATOMIC64_FETCH_OP_AND(_relaxed, ) | |
343 | ATOMIC64_FETCH_OP_AND(_acquire, a, "memory") | |
344 | ATOMIC64_FETCH_OP_AND(_release, l, "memory") | |
345 | ATOMIC64_FETCH_OP_AND( , al, "memory") | |
346 | ||
347 | #undef ATOMIC64_FETCH_OP_AND | |
348 | ||
c09d6a04 WD |
349 | static inline void atomic64_sub(long i, atomic64_t *v) |
350 | { | |
351 | register long x0 asm ("x0") = i; | |
352 | register atomic64_t *x1 asm ("x1") = v; | |
353 | ||
354 | asm volatile(ARM64_LSE_ATOMIC_INSN( | |
355 | /* LL/SC */ | |
05492f2f WD |
356 | __LL_SC_ATOMIC64(sub) |
357 | __nops(1), | |
c09d6a04 WD |
358 | /* LSE atomics */ |
359 | " neg %[i], %[i]\n" | |
360 | " stadd %[i], %[v]") | |
361 | : [i] "+r" (x0), [v] "+Q" (v->counter) | |
362 | : "r" (x1) | |
5be8b70a | 363 | : __LL_SC_CLOBBERS); |
c09d6a04 WD |
364 | } |
365 | ||
305d454a WD |
366 | #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ |
367 | static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ | |
368 | { \ | |
369 | register long x0 asm ("x0") = i; \ | |
370 | register atomic64_t *x1 asm ("x1") = v; \ | |
371 | \ | |
372 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
373 | /* LL/SC */ \ | |
305d454a | 374 | __LL_SC_ATOMIC64(sub_return##name) \ |
05492f2f | 375 | __nops(2), \ |
305d454a WD |
376 | /* LSE atomics */ \ |
377 | " neg %[i], %[i]\n" \ | |
378 | " ldadd" #mb " %[i], x30, %[v]\n" \ | |
379 | " add %[i], %[i], x30") \ | |
380 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | |
381 | : "r" (x1) \ | |
5be8b70a | 382 | : __LL_SC_CLOBBERS, ##cl); \ |
305d454a WD |
383 | \ |
384 | return x0; \ | |
385 | } | |
c09d6a04 | 386 | |
305d454a WD |
387 | ATOMIC64_OP_SUB_RETURN(_relaxed, ) |
388 | ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory") | |
389 | ATOMIC64_OP_SUB_RETURN(_release, l, "memory") | |
390 | ATOMIC64_OP_SUB_RETURN( , al, "memory") | |
c09d6a04 | 391 | |
305d454a | 392 | #undef ATOMIC64_OP_SUB_RETURN |
c0385b24 | 393 | |
2efe95fe WD |
394 | #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ |
395 | static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ | |
396 | { \ | |
8997c934 | 397 | register long x0 asm ("x0") = i; \ |
2efe95fe WD |
398 | register atomic64_t *x1 asm ("x1") = v; \ |
399 | \ | |
400 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
401 | /* LL/SC */ \ | |
05492f2f WD |
402 | __LL_SC_ATOMIC64(fetch_sub##name) \ |
403 | __nops(1), \ | |
2efe95fe WD |
404 | /* LSE atomics */ \ |
405 | " neg %[i], %[i]\n" \ | |
406 | " ldadd" #mb " %[i], %[i], %[v]") \ | |
407 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | |
408 | : "r" (x1) \ | |
409 | : __LL_SC_CLOBBERS, ##cl); \ | |
410 | \ | |
411 | return x0; \ | |
412 | } | |
413 | ||
414 | ATOMIC64_FETCH_OP_SUB(_relaxed, ) | |
415 | ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") | |
416 | ATOMIC64_FETCH_OP_SUB(_release, l, "memory") | |
417 | ATOMIC64_FETCH_OP_SUB( , al, "memory") | |
418 | ||
419 | #undef ATOMIC64_FETCH_OP_SUB | |
420 | ||
c0385b24 WD |
421 | static inline long atomic64_dec_if_positive(atomic64_t *v) |
422 | { | |
c09d6a04 | 423 | register long x0 asm ("x0") = (long)v; |
c0385b24 | 424 | |
c09d6a04 WD |
425 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
426 | /* LL/SC */ | |
c09d6a04 | 427 | __LL_SC_ATOMIC64(dec_if_positive) |
05492f2f | 428 | __nops(6), |
c09d6a04 WD |
429 | /* LSE atomics */ |
430 | "1: ldr x30, %[v]\n" | |
431 | " subs %[ret], x30, #1\n" | |
db26217e | 432 | " b.lt 2f\n" |
c09d6a04 WD |
433 | " casal x30, %[ret], %[v]\n" |
434 | " sub x30, x30, #1\n" | |
435 | " sub x30, x30, %[ret]\n" | |
436 | " cbnz x30, 1b\n" | |
437 | "2:") | |
32fb5d73 | 438 | : [ret] "+r" (x0), [v] "+Q" (v->counter) |
c0385b24 | 439 | : |
5be8b70a | 440 | : __LL_SC_CLOBBERS, "cc", "memory"); |
c0385b24 WD |
441 | |
442 | return x0; | |
443 | } | |
444 | ||
c09d6a04 WD |
445 | #undef __LL_SC_ATOMIC64 |
446 | ||
c342f782 WD |
447 | #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) |
448 | ||
449 | #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \ | |
450 | static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ | |
451 | unsigned long old, \ | |
452 | unsigned long new) \ | |
453 | { \ | |
454 | register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ | |
455 | register unsigned long x1 asm ("x1") = old; \ | |
456 | register unsigned long x2 asm ("x2") = new; \ | |
457 | \ | |
458 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
459 | /* LL/SC */ \ | |
05492f2f WD |
460 | __LL_SC_CMPXCHG(name) \ |
461 | __nops(2), \ | |
c342f782 WD |
462 | /* LSE atomics */ \ |
463 | " mov " #w "30, %" #w "[old]\n" \ | |
464 | " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ | |
465 | " mov %" #w "[ret], " #w "30") \ | |
466 | : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ | |
467 | : [old] "r" (x1), [new] "r" (x2) \ | |
5be8b70a | 468 | : __LL_SC_CLOBBERS, ##cl); \ |
c342f782 WD |
469 | \ |
470 | return x0; \ | |
471 | } | |
472 | ||
305d454a WD |
473 | __CMPXCHG_CASE(w, b, 1, ) |
474 | __CMPXCHG_CASE(w, h, 2, ) | |
475 | __CMPXCHG_CASE(w, , 4, ) | |
476 | __CMPXCHG_CASE(x, , 8, ) | |
477 | __CMPXCHG_CASE(w, b, acq_1, a, "memory") | |
478 | __CMPXCHG_CASE(w, h, acq_2, a, "memory") | |
479 | __CMPXCHG_CASE(w, , acq_4, a, "memory") | |
480 | __CMPXCHG_CASE(x, , acq_8, a, "memory") | |
481 | __CMPXCHG_CASE(w, b, rel_1, l, "memory") | |
482 | __CMPXCHG_CASE(w, h, rel_2, l, "memory") | |
483 | __CMPXCHG_CASE(w, , rel_4, l, "memory") | |
484 | __CMPXCHG_CASE(x, , rel_8, l, "memory") | |
485 | __CMPXCHG_CASE(w, b, mb_1, al, "memory") | |
486 | __CMPXCHG_CASE(w, h, mb_2, al, "memory") | |
487 | __CMPXCHG_CASE(w, , mb_4, al, "memory") | |
488 | __CMPXCHG_CASE(x, , mb_8, al, "memory") | |
c342f782 WD |
489 | |
490 | #undef __LL_SC_CMPXCHG | |
491 | #undef __CMPXCHG_CASE | |
492 | ||
e9a4b795 WD |
493 | #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) |
494 | ||
495 | #define __CMPXCHG_DBL(name, mb, cl...) \ | |
57a65667 | 496 | static inline long __cmpxchg_double##name(unsigned long old1, \ |
e9a4b795 WD |
497 | unsigned long old2, \ |
498 | unsigned long new1, \ | |
499 | unsigned long new2, \ | |
500 | volatile void *ptr) \ | |
501 | { \ | |
502 | unsigned long oldval1 = old1; \ | |
503 | unsigned long oldval2 = old2; \ | |
504 | register unsigned long x0 asm ("x0") = old1; \ | |
505 | register unsigned long x1 asm ("x1") = old2; \ | |
506 | register unsigned long x2 asm ("x2") = new1; \ | |
507 | register unsigned long x3 asm ("x3") = new2; \ | |
508 | register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ | |
509 | \ | |
510 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | |
511 | /* LL/SC */ \ | |
05492f2f WD |
512 | __LL_SC_CMPXCHG_DBL(name) \ |
513 | __nops(3), \ | |
e9a4b795 WD |
514 | /* LSE atomics */ \ |
515 | " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ | |
516 | " eor %[old1], %[old1], %[oldval1]\n" \ | |
517 | " eor %[old2], %[old2], %[oldval2]\n" \ | |
518 | " orr %[old1], %[old1], %[old2]") \ | |
519 | : [old1] "+r" (x0), [old2] "+r" (x1), \ | |
520 | [v] "+Q" (*(unsigned long *)ptr) \ | |
521 | : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ | |
522 | [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ | |
5be8b70a | 523 | : __LL_SC_CLOBBERS, ##cl); \ |
e9a4b795 WD |
524 | \ |
525 | return x0; \ | |
526 | } | |
527 | ||
528 | __CMPXCHG_DBL( , ) | |
529 | __CMPXCHG_DBL(_mb, al, "memory") | |
530 | ||
531 | #undef __LL_SC_CMPXCHG_DBL | |
532 | #undef __CMPXCHG_DBL | |
533 | ||
c0385b24 | 534 | #endif /* __ASM_ATOMIC_LSE_H */ |