]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/ppc/include/rte_atomic.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / ppc / include / rte_atomic.h
1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
4 * Copyright (c) 2008 Marcel Moolenaar
5 * Copyright (c) 2001 Benno Rice
6 * Copyright (c) 2001 David E. O'Brien
7 * Copyright (c) 1998 Doug Rabson
8 * All rights reserved.
9 */
10
11 #ifndef _RTE_ATOMIC_PPC_64_H_
12 #define _RTE_ATOMIC_PPC_64_H_
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 #include <stdint.h>
19 #include "generic/rte_atomic.h"
20
21 #define rte_mb() asm volatile("sync" : : : "memory")
22
23 #define rte_wmb() asm volatile("sync" : : : "memory")
24
25 #define rte_rmb() asm volatile("sync" : : : "memory")
26
27 #define rte_smp_mb() rte_mb()
28
29 #define rte_smp_wmb() rte_wmb()
30
31 #define rte_smp_rmb() rte_rmb()
32
33 #define rte_io_mb() rte_mb()
34
35 #define rte_io_wmb() rte_wmb()
36
37 #define rte_io_rmb() rte_rmb()
38
39 #define rte_cio_wmb() rte_wmb()
40
41 #define rte_cio_rmb() rte_rmb()
42
43 /*------------------------- 16 bit atomic operations -------------------------*/
44 /* To be compatible with Power7, use GCC built-in functions for 16 bit
45 * operations */
46
47 #ifndef RTE_FORCE_INTRINSICS
48 static inline int
49 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
50 {
51 return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
52 __ATOMIC_ACQUIRE) ? 1 : 0;
53 }
54
55 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
56 {
57 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
58 }
59
60 static inline void
61 rte_atomic16_inc(rte_atomic16_t *v)
62 {
63 __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
64 }
65
66 static inline void
67 rte_atomic16_dec(rte_atomic16_t *v)
68 {
69 __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
70 }
71
72 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
73 {
74 return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
75 }
76
77 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
78 {
79 return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
80 }
81
82 static inline uint16_t
83 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
84 {
85 return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
86 }
87
88 /*------------------------- 32 bit atomic operations -------------------------*/
89
90 static inline int
91 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
92 {
93 unsigned int ret = 0;
94
95 asm volatile(
96 "\tlwsync\n"
97 "1:\tlwarx %[ret], 0, %[dst]\n"
98 "cmplw %[exp], %[ret]\n"
99 "bne 2f\n"
100 "stwcx. %[src], 0, %[dst]\n"
101 "bne- 1b\n"
102 "li %[ret], 1\n"
103 "b 3f\n"
104 "2:\n"
105 "stwcx. %[ret], 0, %[dst]\n"
106 "li %[ret], 0\n"
107 "3:\n"
108 "isync\n"
109 : [ret] "=&r" (ret), "=m" (*dst)
110 : [dst] "r" (dst),
111 [exp] "r" (exp),
112 [src] "r" (src),
113 "m" (*dst)
114 : "cc", "memory");
115
116 return ret;
117 }
118
119 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
120 {
121 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
122 }
123
124 static inline void
125 rte_atomic32_inc(rte_atomic32_t *v)
126 {
127 int t;
128
129 asm volatile(
130 "1: lwarx %[t],0,%[cnt]\n"
131 "addic %[t],%[t],1\n"
132 "stwcx. %[t],0,%[cnt]\n"
133 "bne- 1b\n"
134 : [t] "=&r" (t), "=m" (v->cnt)
135 : [cnt] "r" (&v->cnt), "m" (v->cnt)
136 : "cc", "xer", "memory");
137 }
138
139 static inline void
140 rte_atomic32_dec(rte_atomic32_t *v)
141 {
142 int t;
143
144 asm volatile(
145 "1: lwarx %[t],0,%[cnt]\n"
146 "addic %[t],%[t],-1\n"
147 "stwcx. %[t],0,%[cnt]\n"
148 "bne- 1b\n"
149 : [t] "=&r" (t), "=m" (v->cnt)
150 : [cnt] "r" (&v->cnt), "m" (v->cnt)
151 : "cc", "xer", "memory");
152 }
153
154 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
155 {
156 int ret;
157
158 asm volatile(
159 "\n\tlwsync\n"
160 "1: lwarx %[ret],0,%[cnt]\n"
161 "addic %[ret],%[ret],1\n"
162 "stwcx. %[ret],0,%[cnt]\n"
163 "bne- 1b\n"
164 "isync\n"
165 : [ret] "=&r" (ret)
166 : [cnt] "r" (&v->cnt)
167 : "cc", "xer", "memory");
168
169 return ret == 0;
170 }
171
172 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
173 {
174 int ret;
175
176 asm volatile(
177 "\n\tlwsync\n"
178 "1: lwarx %[ret],0,%[cnt]\n"
179 "addic %[ret],%[ret],-1\n"
180 "stwcx. %[ret],0,%[cnt]\n"
181 "bne- 1b\n"
182 "isync\n"
183 : [ret] "=&r" (ret)
184 : [cnt] "r" (&v->cnt)
185 : "cc", "xer", "memory");
186
187 return ret == 0;
188 }
189
190 static inline uint32_t
191 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
192 {
193 return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
194 }
195
196 /*------------------------- 64 bit atomic operations -------------------------*/
197
198 static inline int
199 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
200 {
201 unsigned int ret = 0;
202
203 asm volatile (
204 "\tlwsync\n"
205 "1: ldarx %[ret], 0, %[dst]\n"
206 "cmpld %[exp], %[ret]\n"
207 "bne 2f\n"
208 "stdcx. %[src], 0, %[dst]\n"
209 "bne- 1b\n"
210 "li %[ret], 1\n"
211 "b 3f\n"
212 "2:\n"
213 "stdcx. %[ret], 0, %[dst]\n"
214 "li %[ret], 0\n"
215 "3:\n"
216 "isync\n"
217 : [ret] "=&r" (ret), "=m" (*dst)
218 : [dst] "r" (dst),
219 [exp] "r" (exp),
220 [src] "r" (src),
221 "m" (*dst)
222 : "cc", "memory");
223 return ret;
224 }
225
226 static inline void
227 rte_atomic64_init(rte_atomic64_t *v)
228 {
229 v->cnt = 0;
230 }
231
232 static inline int64_t
233 rte_atomic64_read(rte_atomic64_t *v)
234 {
235 long ret;
236
237 asm volatile("ld%U1%X1 %[ret],%[cnt]"
238 : [ret] "=r"(ret)
239 : [cnt] "m"(v->cnt));
240
241 return ret;
242 }
243
244 static inline void
245 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
246 {
247 asm volatile("std%U0%X0 %[new_value],%[cnt]"
248 : [cnt] "=m"(v->cnt)
249 : [new_value] "r"(new_value));
250 }
251
252 static inline void
253 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
254 {
255 long t;
256
257 asm volatile(
258 "1: ldarx %[t],0,%[cnt]\n"
259 "add %[t],%[inc],%[t]\n"
260 "stdcx. %[t],0,%[cnt]\n"
261 "bne- 1b\n"
262 : [t] "=&r" (t), "=m" (v->cnt)
263 : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
264 : "cc", "memory");
265 }
266
267 static inline void
268 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
269 {
270 long t;
271
272 asm volatile(
273 "1: ldarx %[t],0,%[cnt]\n"
274 "subf %[t],%[dec],%[t]\n"
275 "stdcx. %[t],0,%[cnt]\n"
276 "bne- 1b\n"
277 : [t] "=&r" (t), "+m" (v->cnt)
278 : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
279 : "cc", "memory");
280 }
281
282 static inline void
283 rte_atomic64_inc(rte_atomic64_t *v)
284 {
285 long t;
286
287 asm volatile(
288 "1: ldarx %[t],0,%[cnt]\n"
289 "addic %[t],%[t],1\n"
290 "stdcx. %[t],0,%[cnt]\n"
291 "bne- 1b\n"
292 : [t] "=&r" (t), "+m" (v->cnt)
293 : [cnt] "r" (&v->cnt), "m" (v->cnt)
294 : "cc", "xer", "memory");
295 }
296
297 static inline void
298 rte_atomic64_dec(rte_atomic64_t *v)
299 {
300 long t;
301
302 asm volatile(
303 "1: ldarx %[t],0,%[cnt]\n"
304 "addic %[t],%[t],-1\n"
305 "stdcx. %[t],0,%[cnt]\n"
306 "bne- 1b\n"
307 : [t] "=&r" (t), "+m" (v->cnt)
308 : [cnt] "r" (&v->cnt), "m" (v->cnt)
309 : "cc", "xer", "memory");
310 }
311
312 static inline int64_t
313 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
314 {
315 long ret;
316
317 asm volatile(
318 "\n\tlwsync\n"
319 "1: ldarx %[ret],0,%[cnt]\n"
320 "add %[ret],%[inc],%[ret]\n"
321 "stdcx. %[ret],0,%[cnt]\n"
322 "bne- 1b\n"
323 "isync\n"
324 : [ret] "=&r" (ret)
325 : [inc] "r" (inc), [cnt] "r" (&v->cnt)
326 : "cc", "memory");
327
328 return ret;
329 }
330
331 static inline int64_t
332 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
333 {
334 long ret;
335
336 asm volatile(
337 "\n\tlwsync\n"
338 "1: ldarx %[ret],0,%[cnt]\n"
339 "subf %[ret],%[dec],%[ret]\n"
340 "stdcx. %[ret],0,%[cnt]\n"
341 "bne- 1b\n"
342 "isync\n"
343 : [ret] "=&r" (ret)
344 : [dec] "r" (dec), [cnt] "r" (&v->cnt)
345 : "cc", "memory");
346
347 return ret;
348 }
349
350 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
351 {
352 long ret;
353
354 asm volatile(
355 "\n\tlwsync\n"
356 "1: ldarx %[ret],0,%[cnt]\n"
357 "addic %[ret],%[ret],1\n"
358 "stdcx. %[ret],0,%[cnt]\n"
359 "bne- 1b\n"
360 "isync\n"
361 : [ret] "=&r" (ret)
362 : [cnt] "r" (&v->cnt)
363 : "cc", "xer", "memory");
364
365 return ret == 0;
366 }
367
368 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
369 {
370 long ret;
371
372 asm volatile(
373 "\n\tlwsync\n"
374 "1: ldarx %[ret],0,%[cnt]\n"
375 "addic %[ret],%[ret],-1\n"
376 "stdcx. %[ret],0,%[cnt]\n"
377 "bne- 1b\n"
378 "isync\n"
379 : [ret] "=&r" (ret)
380 : [cnt] "r" (&v->cnt)
381 : "cc", "xer", "memory");
382
383 return ret == 0;
384 }
385
386 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
387 {
388 return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
389 }
390 /**
391 * Atomically set a 64-bit counter to 0.
392 *
393 * @param v
394 * A pointer to the atomic counter.
395 */
396 static inline void rte_atomic64_clear(rte_atomic64_t *v)
397 {
398 v->cnt = 0;
399 }
400
401 static inline uint64_t
402 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
403 {
404 return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
405 }
406
407 #endif
408
409 #ifdef __cplusplus
410 }
411 #endif
412
413 #endif /* _RTE_ATOMIC_PPC_64_H_ */