]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / common / include / arch / ppc_64 / rte_atomic.h
1 /*
2 * BSD LICENSE
3 *
4 * Copyright (C) IBM Corporation 2014.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of IBM Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
35 * Copyright (c) 2008 Marcel Moolenaar
36 * Copyright (c) 2001 Benno Rice
37 * Copyright (c) 2001 David E. O'Brien
38 * Copyright (c) 1998 Doug Rabson
39 * All rights reserved.
40 */
41
42 #ifndef _RTE_ATOMIC_PPC_64_H_
43 #define _RTE_ATOMIC_PPC_64_H_
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 #include <stdint.h>
50 #include "generic/rte_atomic.h"
51
52 /**
53 * General memory barrier.
54 *
55 * Guarantees that the LOAD and STORE operations generated before the
56 * barrier occur before the LOAD and STORE operations generated after.
57 */
58 #define rte_mb() asm volatile("sync" : : : "memory")
59
60 /**
61 * Write memory barrier.
62 *
63 * Guarantees that the STORE operations generated before the barrier
64 * occur before the STORE operations generated after.
65 */
66 #ifdef RTE_ARCH_64
67 #define rte_wmb() asm volatile("lwsync" : : : "memory")
68 #else
69 #define rte_wmb() asm volatile("sync" : : : "memory")
70 #endif
71
72 /**
73 * Read memory barrier.
74 *
75 * Guarantees that the LOAD operations generated before the barrier
76 * occur before the LOAD operations generated after.
77 */
78 #ifdef RTE_ARCH_64
79 #define rte_rmb() asm volatile("lwsync" : : : "memory")
80 #else
81 #define rte_rmb() asm volatile("sync" : : : "memory")
82 #endif
83
84 #define rte_smp_mb() rte_mb()
85
86 #define rte_smp_wmb() rte_wmb()
87
88 #define rte_smp_rmb() rte_rmb()
89
90 #define rte_io_mb() rte_mb()
91
92 #define rte_io_wmb() rte_wmb()
93
94 #define rte_io_rmb() rte_rmb()
95
96 #define rte_cio_wmb() rte_wmb()
97
98 #define rte_cio_rmb() rte_rmb()
99
100 /*------------------------- 16 bit atomic operations -------------------------*/
101 /* To be compatible with Power7, use GCC built-in functions for 16 bit
102 * operations */
103
104 #ifndef RTE_FORCE_INTRINSICS
105 static inline int
106 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
107 {
108 return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
109 __ATOMIC_ACQUIRE) ? 1 : 0;
110 }
111
112 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
113 {
114 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
115 }
116
117 static inline void
118 rte_atomic16_inc(rte_atomic16_t *v)
119 {
120 __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
121 }
122
123 static inline void
124 rte_atomic16_dec(rte_atomic16_t *v)
125 {
126 __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
127 }
128
129 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
130 {
131 return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
132 }
133
134 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
135 {
136 return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
137 }
138
139 static inline uint16_t
140 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
141 {
142 return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
143 }
144
145 /*------------------------- 32 bit atomic operations -------------------------*/
146
147 static inline int
148 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
149 {
150 unsigned int ret = 0;
151
152 asm volatile(
153 "\tlwsync\n"
154 "1:\tlwarx %[ret], 0, %[dst]\n"
155 "cmplw %[exp], %[ret]\n"
156 "bne 2f\n"
157 "stwcx. %[src], 0, %[dst]\n"
158 "bne- 1b\n"
159 "li %[ret], 1\n"
160 "b 3f\n"
161 "2:\n"
162 "stwcx. %[ret], 0, %[dst]\n"
163 "li %[ret], 0\n"
164 "3:\n"
165 "isync\n"
166 : [ret] "=&r" (ret), "=m" (*dst)
167 : [dst] "r" (dst),
168 [exp] "r" (exp),
169 [src] "r" (src),
170 "m" (*dst)
171 : "cc", "memory");
172
173 return ret;
174 }
175
176 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
177 {
178 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
179 }
180
181 static inline void
182 rte_atomic32_inc(rte_atomic32_t *v)
183 {
184 int t;
185
186 asm volatile(
187 "1: lwarx %[t],0,%[cnt]\n"
188 "addic %[t],%[t],1\n"
189 "stwcx. %[t],0,%[cnt]\n"
190 "bne- 1b\n"
191 : [t] "=&r" (t), "=m" (v->cnt)
192 : [cnt] "r" (&v->cnt), "m" (v->cnt)
193 : "cc", "xer", "memory");
194 }
195
196 static inline void
197 rte_atomic32_dec(rte_atomic32_t *v)
198 {
199 int t;
200
201 asm volatile(
202 "1: lwarx %[t],0,%[cnt]\n"
203 "addic %[t],%[t],-1\n"
204 "stwcx. %[t],0,%[cnt]\n"
205 "bne- 1b\n"
206 : [t] "=&r" (t), "=m" (v->cnt)
207 : [cnt] "r" (&v->cnt), "m" (v->cnt)
208 : "cc", "xer", "memory");
209 }
210
211 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
212 {
213 int ret;
214
215 asm volatile(
216 "\n\tlwsync\n"
217 "1: lwarx %[ret],0,%[cnt]\n"
218 "addic %[ret],%[ret],1\n"
219 "stwcx. %[ret],0,%[cnt]\n"
220 "bne- 1b\n"
221 "isync\n"
222 : [ret] "=&r" (ret)
223 : [cnt] "r" (&v->cnt)
224 : "cc", "xer", "memory");
225
226 return ret == 0;
227 }
228
229 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
230 {
231 int ret;
232
233 asm volatile(
234 "\n\tlwsync\n"
235 "1: lwarx %[ret],0,%[cnt]\n"
236 "addic %[ret],%[ret],-1\n"
237 "stwcx. %[ret],0,%[cnt]\n"
238 "bne- 1b\n"
239 "isync\n"
240 : [ret] "=&r" (ret)
241 : [cnt] "r" (&v->cnt)
242 : "cc", "xer", "memory");
243
244 return ret == 0;
245 }
246
247 static inline uint32_t
248 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
249 {
250 return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
251 }
252
253 /*------------------------- 64 bit atomic operations -------------------------*/
254
255 static inline int
256 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
257 {
258 unsigned int ret = 0;
259
260 asm volatile (
261 "\tlwsync\n"
262 "1: ldarx %[ret], 0, %[dst]\n"
263 "cmpld %[exp], %[ret]\n"
264 "bne 2f\n"
265 "stdcx. %[src], 0, %[dst]\n"
266 "bne- 1b\n"
267 "li %[ret], 1\n"
268 "b 3f\n"
269 "2:\n"
270 "stdcx. %[ret], 0, %[dst]\n"
271 "li %[ret], 0\n"
272 "3:\n"
273 "isync\n"
274 : [ret] "=&r" (ret), "=m" (*dst)
275 : [dst] "r" (dst),
276 [exp] "r" (exp),
277 [src] "r" (src),
278 "m" (*dst)
279 : "cc", "memory");
280 return ret;
281 }
282
283 static inline void
284 rte_atomic64_init(rte_atomic64_t *v)
285 {
286 v->cnt = 0;
287 }
288
289 static inline int64_t
290 rte_atomic64_read(rte_atomic64_t *v)
291 {
292 long ret;
293
294 asm volatile("ld%U1%X1 %[ret],%[cnt]"
295 : [ret] "=r"(ret)
296 : [cnt] "m"(v->cnt));
297
298 return ret;
299 }
300
301 static inline void
302 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
303 {
304 asm volatile("std%U0%X0 %[new_value],%[cnt]"
305 : [cnt] "=m"(v->cnt)
306 : [new_value] "r"(new_value));
307 }
308
309 static inline void
310 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
311 {
312 long t;
313
314 asm volatile(
315 "1: ldarx %[t],0,%[cnt]\n"
316 "add %[t],%[inc],%[t]\n"
317 "stdcx. %[t],0,%[cnt]\n"
318 "bne- 1b\n"
319 : [t] "=&r" (t), "=m" (v->cnt)
320 : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
321 : "cc", "memory");
322 }
323
324 static inline void
325 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
326 {
327 long t;
328
329 asm volatile(
330 "1: ldarx %[t],0,%[cnt]\n"
331 "subf %[t],%[dec],%[t]\n"
332 "stdcx. %[t],0,%[cnt]\n"
333 "bne- 1b\n"
334 : [t] "=&r" (t), "+m" (v->cnt)
335 : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
336 : "cc", "memory");
337 }
338
339 static inline void
340 rte_atomic64_inc(rte_atomic64_t *v)
341 {
342 long t;
343
344 asm volatile(
345 "1: ldarx %[t],0,%[cnt]\n"
346 "addic %[t],%[t],1\n"
347 "stdcx. %[t],0,%[cnt]\n"
348 "bne- 1b\n"
349 : [t] "=&r" (t), "+m" (v->cnt)
350 : [cnt] "r" (&v->cnt), "m" (v->cnt)
351 : "cc", "xer", "memory");
352 }
353
354 static inline void
355 rte_atomic64_dec(rte_atomic64_t *v)
356 {
357 long t;
358
359 asm volatile(
360 "1: ldarx %[t],0,%[cnt]\n"
361 "addic %[t],%[t],-1\n"
362 "stdcx. %[t],0,%[cnt]\n"
363 "bne- 1b\n"
364 : [t] "=&r" (t), "+m" (v->cnt)
365 : [cnt] "r" (&v->cnt), "m" (v->cnt)
366 : "cc", "xer", "memory");
367 }
368
369 static inline int64_t
370 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
371 {
372 long ret;
373
374 asm volatile(
375 "\n\tlwsync\n"
376 "1: ldarx %[ret],0,%[cnt]\n"
377 "add %[ret],%[inc],%[ret]\n"
378 "stdcx. %[ret],0,%[cnt]\n"
379 "bne- 1b\n"
380 "isync\n"
381 : [ret] "=&r" (ret)
382 : [inc] "r" (inc), [cnt] "r" (&v->cnt)
383 : "cc", "memory");
384
385 return ret;
386 }
387
388 static inline int64_t
389 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
390 {
391 long ret;
392
393 asm volatile(
394 "\n\tlwsync\n"
395 "1: ldarx %[ret],0,%[cnt]\n"
396 "subf %[ret],%[dec],%[ret]\n"
397 "stdcx. %[ret],0,%[cnt]\n"
398 "bne- 1b\n"
399 "isync\n"
400 : [ret] "=&r" (ret)
401 : [dec] "r" (dec), [cnt] "r" (&v->cnt)
402 : "cc", "memory");
403
404 return ret;
405 }
406
407 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
408 {
409 long ret;
410
411 asm volatile(
412 "\n\tlwsync\n"
413 "1: ldarx %[ret],0,%[cnt]\n"
414 "addic %[ret],%[ret],1\n"
415 "stdcx. %[ret],0,%[cnt]\n"
416 "bne- 1b\n"
417 "isync\n"
418 : [ret] "=&r" (ret)
419 : [cnt] "r" (&v->cnt)
420 : "cc", "xer", "memory");
421
422 return ret == 0;
423 }
424
425 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
426 {
427 long ret;
428
429 asm volatile(
430 "\n\tlwsync\n"
431 "1: ldarx %[ret],0,%[cnt]\n"
432 "addic %[ret],%[ret],-1\n"
433 "stdcx. %[ret],0,%[cnt]\n"
434 "bne- 1b\n"
435 "isync\n"
436 : [ret] "=&r" (ret)
437 : [cnt] "r" (&v->cnt)
438 : "cc", "xer", "memory");
439
440 return ret == 0;
441 }
442
443 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
444 {
445 return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
446 }
447 /**
448 * Atomically set a 64-bit counter to 0.
449 *
450 * @param v
451 * A pointer to the atomic counter.
452 */
453 static inline void rte_atomic64_clear(rte_atomic64_t *v)
454 {
455 v->cnt = 0;
456 }
457
458 static inline uint64_t
459 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
460 {
461 return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
462 }
463
464 #endif
465
466 #ifdef __cplusplus
467 }
468 #endif
469
470 #endif /* _RTE_ATOMIC_PPC_64_H_ */