]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_eal / common / include / arch / ppc_64 / rte_atomic.h
1 /*
2 * BSD LICENSE
3 *
4 * Copyright (C) IBM Corporation 2014.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of IBM Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
35 * Copyright (c) 2008 Marcel Moolenaar
36 * Copyright (c) 2001 Benno Rice
37 * Copyright (c) 2001 David E. O'Brien
38 * Copyright (c) 1998 Doug Rabson
39 * All rights reserved.
40 */
41
42 #ifndef _RTE_ATOMIC_PPC_64_H_
43 #define _RTE_ATOMIC_PPC_64_H_
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 #include <stdint.h>
50 #include "generic/rte_atomic.h"
51
52 /**
53 * General memory barrier.
54 *
55 * Guarantees that the LOAD and STORE operations generated before the
56 * barrier occur before the LOAD and STORE operations generated after.
57 */
58 #define rte_mb() {asm volatile("sync" : : : "memory"); }
59
60 /**
61 * Write memory barrier.
62 *
63 * Guarantees that the STORE operations generated before the barrier
64 * occur before the STORE operations generated after.
65 */
66 #ifdef RTE_ARCH_64
67 #define rte_wmb() {asm volatile("lwsync" : : : "memory"); }
68 #else
69 #define rte_wmb() {asm volatile("sync" : : : "memory"); }
70 #endif
71
72 /**
73 * Read memory barrier.
74 *
75 * Guarantees that the LOAD operations generated before the barrier
76 * occur before the LOAD operations generated after.
77 */
78 #ifdef RTE_ARCH_64
79 #define rte_rmb() {asm volatile("lwsync" : : : "memory"); }
80 #else
81 #define rte_rmb() {asm volatile("sync" : : : "memory"); }
82 #endif
83
84 #define rte_smp_mb() rte_mb()
85
86 #define rte_smp_wmb() rte_wmb()
87
88 #define rte_smp_rmb() rte_rmb()
89
90 #define rte_io_mb() rte_mb()
91
92 #define rte_io_wmb() rte_wmb()
93
94 #define rte_io_rmb() rte_rmb()
95
96 /*------------------------- 16 bit atomic operations -------------------------*/
97 /* To be compatible with Power7, use GCC built-in functions for 16 bit
98 * operations */
99
100 #ifndef RTE_FORCE_INTRINSICS
101 static inline int
102 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
103 {
104 return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
105 __ATOMIC_ACQUIRE) ? 1 : 0;
106 }
107
108 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
109 {
110 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
111 }
112
113 static inline void
114 rte_atomic16_inc(rte_atomic16_t *v)
115 {
116 __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
117 }
118
119 static inline void
120 rte_atomic16_dec(rte_atomic16_t *v)
121 {
122 __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
123 }
124
125 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
126 {
127 return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
128 }
129
130 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
131 {
132 return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
133 }
134
135 /*------------------------- 32 bit atomic operations -------------------------*/
136
137 static inline int
138 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
139 {
140 unsigned int ret = 0;
141
142 asm volatile(
143 "\tlwsync\n"
144 "1:\tlwarx %[ret], 0, %[dst]\n"
145 "cmplw %[exp], %[ret]\n"
146 "bne 2f\n"
147 "stwcx. %[src], 0, %[dst]\n"
148 "bne- 1b\n"
149 "li %[ret], 1\n"
150 "b 3f\n"
151 "2:\n"
152 "stwcx. %[ret], 0, %[dst]\n"
153 "li %[ret], 0\n"
154 "3:\n"
155 "isync\n"
156 : [ret] "=&r" (ret), "=m" (*dst)
157 : [dst] "r" (dst),
158 [exp] "r" (exp),
159 [src] "r" (src),
160 "m" (*dst)
161 : "cc", "memory");
162
163 return ret;
164 }
165
166 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
167 {
168 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
169 }
170
171 static inline void
172 rte_atomic32_inc(rte_atomic32_t *v)
173 {
174 int t;
175
176 asm volatile(
177 "1: lwarx %[t],0,%[cnt]\n"
178 "addic %[t],%[t],1\n"
179 "stwcx. %[t],0,%[cnt]\n"
180 "bne- 1b\n"
181 : [t] "=&r" (t), "=m" (v->cnt)
182 : [cnt] "r" (&v->cnt), "m" (v->cnt)
183 : "cc", "xer", "memory");
184 }
185
186 static inline void
187 rte_atomic32_dec(rte_atomic32_t *v)
188 {
189 int t;
190
191 asm volatile(
192 "1: lwarx %[t],0,%[cnt]\n"
193 "addic %[t],%[t],-1\n"
194 "stwcx. %[t],0,%[cnt]\n"
195 "bne- 1b\n"
196 : [t] "=&r" (t), "=m" (v->cnt)
197 : [cnt] "r" (&v->cnt), "m" (v->cnt)
198 : "cc", "xer", "memory");
199 }
200
201 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
202 {
203 int ret;
204
205 asm volatile(
206 "\n\tlwsync\n"
207 "1: lwarx %[ret],0,%[cnt]\n"
208 "addic %[ret],%[ret],1\n"
209 "stwcx. %[ret],0,%[cnt]\n"
210 "bne- 1b\n"
211 "isync\n"
212 : [ret] "=&r" (ret)
213 : [cnt] "r" (&v->cnt)
214 : "cc", "xer", "memory");
215
216 return ret == 0;
217 }
218
219 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
220 {
221 int ret;
222
223 asm volatile(
224 "\n\tlwsync\n"
225 "1: lwarx %[ret],0,%[cnt]\n"
226 "addic %[ret],%[ret],-1\n"
227 "stwcx. %[ret],0,%[cnt]\n"
228 "bne- 1b\n"
229 "isync\n"
230 : [ret] "=&r" (ret)
231 : [cnt] "r" (&v->cnt)
232 : "cc", "xer", "memory");
233
234 return ret == 0;
235 }
236 /*------------------------- 64 bit atomic operations -------------------------*/
237
238 static inline int
239 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
240 {
241 unsigned int ret = 0;
242
243 asm volatile (
244 "\tlwsync\n"
245 "1: ldarx %[ret], 0, %[dst]\n"
246 "cmpld %[exp], %[ret]\n"
247 "bne 2f\n"
248 "stdcx. %[src], 0, %[dst]\n"
249 "bne- 1b\n"
250 "li %[ret], 1\n"
251 "b 3f\n"
252 "2:\n"
253 "stdcx. %[ret], 0, %[dst]\n"
254 "li %[ret], 0\n"
255 "3:\n"
256 "isync\n"
257 : [ret] "=&r" (ret), "=m" (*dst)
258 : [dst] "r" (dst),
259 [exp] "r" (exp),
260 [src] "r" (src),
261 "m" (*dst)
262 : "cc", "memory");
263 return ret;
264 }
265
266 static inline void
267 rte_atomic64_init(rte_atomic64_t *v)
268 {
269 v->cnt = 0;
270 }
271
272 static inline int64_t
273 rte_atomic64_read(rte_atomic64_t *v)
274 {
275 long ret;
276
277 asm volatile("ld%U1%X1 %[ret],%[cnt]"
278 : [ret] "=r"(ret)
279 : [cnt] "m"(v->cnt));
280
281 return ret;
282 }
283
284 static inline void
285 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
286 {
287 asm volatile("std%U0%X0 %[new_value],%[cnt]"
288 : [cnt] "=m"(v->cnt)
289 : [new_value] "r"(new_value));
290 }
291
292 static inline void
293 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
294 {
295 long t;
296
297 asm volatile(
298 "1: ldarx %[t],0,%[cnt]\n"
299 "add %[t],%[inc],%[t]\n"
300 "stdcx. %[t],0,%[cnt]\n"
301 "bne- 1b\n"
302 : [t] "=&r" (t), "=m" (v->cnt)
303 : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
304 : "cc", "memory");
305 }
306
307 static inline void
308 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
309 {
310 long t;
311
312 asm volatile(
313 "1: ldarx %[t],0,%[cnt]\n"
314 "subf %[t],%[dec],%[t]\n"
315 "stdcx. %[t],0,%[cnt]\n"
316 "bne- 1b\n"
317 : [t] "=&r" (t), "+m" (v->cnt)
318 : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
319 : "cc", "memory");
320 }
321
322 static inline void
323 rte_atomic64_inc(rte_atomic64_t *v)
324 {
325 long t;
326
327 asm volatile(
328 "1: ldarx %[t],0,%[cnt]\n"
329 "addic %[t],%[t],1\n"
330 "stdcx. %[t],0,%[cnt]\n"
331 "bne- 1b\n"
332 : [t] "=&r" (t), "+m" (v->cnt)
333 : [cnt] "r" (&v->cnt), "m" (v->cnt)
334 : "cc", "xer", "memory");
335 }
336
337 static inline void
338 rte_atomic64_dec(rte_atomic64_t *v)
339 {
340 long t;
341
342 asm volatile(
343 "1: ldarx %[t],0,%[cnt]\n"
344 "addic %[t],%[t],-1\n"
345 "stdcx. %[t],0,%[cnt]\n"
346 "bne- 1b\n"
347 : [t] "=&r" (t), "+m" (v->cnt)
348 : [cnt] "r" (&v->cnt), "m" (v->cnt)
349 : "cc", "xer", "memory");
350 }
351
352 static inline int64_t
353 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
354 {
355 long ret;
356
357 asm volatile(
358 "\n\tlwsync\n"
359 "1: ldarx %[ret],0,%[cnt]\n"
360 "add %[ret],%[inc],%[ret]\n"
361 "stdcx. %[ret],0,%[cnt]\n"
362 "bne- 1b\n"
363 "isync\n"
364 : [ret] "=&r" (ret)
365 : [inc] "r" (inc), [cnt] "r" (&v->cnt)
366 : "cc", "memory");
367
368 return ret;
369 }
370
371 static inline int64_t
372 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
373 {
374 long ret;
375
376 asm volatile(
377 "\n\tlwsync\n"
378 "1: ldarx %[ret],0,%[cnt]\n"
379 "subf %[ret],%[dec],%[ret]\n"
380 "stdcx. %[ret],0,%[cnt]\n"
381 "bne- 1b\n"
382 "isync\n"
383 : [ret] "=&r" (ret)
384 : [dec] "r" (dec), [cnt] "r" (&v->cnt)
385 : "cc", "memory");
386
387 return ret;
388 }
389
390 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
391 {
392 long ret;
393
394 asm volatile(
395 "\n\tlwsync\n"
396 "1: ldarx %[ret],0,%[cnt]\n"
397 "addic %[ret],%[ret],1\n"
398 "stdcx. %[ret],0,%[cnt]\n"
399 "bne- 1b\n"
400 "isync\n"
401 : [ret] "=&r" (ret)
402 : [cnt] "r" (&v->cnt)
403 : "cc", "xer", "memory");
404
405 return ret == 0;
406 }
407
408 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
409 {
410 long ret;
411
412 asm volatile(
413 "\n\tlwsync\n"
414 "1: ldarx %[ret],0,%[cnt]\n"
415 "addic %[ret],%[ret],-1\n"
416 "stdcx. %[ret],0,%[cnt]\n"
417 "bne- 1b\n"
418 "isync\n"
419 : [ret] "=&r" (ret)
420 : [cnt] "r" (&v->cnt)
421 : "cc", "xer", "memory");
422
423 return ret == 0;
424 }
425
426 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
427 {
428 return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
429 }
430
431 /**
432 * Atomically set a 64-bit counter to 0.
433 *
434 * @param v
435 * A pointer to the atomic counter.
436 */
437 static inline void rte_atomic64_clear(rte_atomic64_t *v)
438 {
439 v->cnt = 0;
440 }
441 #endif
442
443 #ifdef __cplusplus
444 }
445 #endif
446
447 #endif /* _RTE_ATOMIC_PPC_64_H_ */