]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2014 Intel Corporation. | |
7c673cae FG |
3 | */ |
4 | ||
5 | /* | |
6 | * Inspired from FreeBSD src/sys/amd64/include/atomic.h | |
7 | * Copyright (c) 1998 Doug Rabson | |
9f95a23c | 8 | * Copyright (c) 2019 Intel Corporation |
7c673cae FG |
9 | * All rights reserved. |
10 | */ | |
11 | ||
12 | #ifndef _RTE_ATOMIC_X86_H_ | |
13 | #error do not include this file directly, use <rte_atomic.h> instead | |
14 | #endif | |
15 | ||
16 | #ifndef _RTE_ATOMIC_X86_64_H_ | |
17 | #define _RTE_ATOMIC_X86_64_H_ | |
18 | ||
19 | #include <stdint.h> | |
20 | #include <rte_common.h> | |
9f95a23c | 21 | #include <rte_compat.h> |
7c673cae FG |
22 | #include <rte_atomic.h> |
23 | ||
24 | /*------------------------- 64 bit atomic operations -------------------------*/ | |
25 | ||
26 | #ifndef RTE_FORCE_INTRINSICS | |
27 | static inline int | |
28 | rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) | |
29 | { | |
30 | uint8_t res; | |
31 | ||
32 | ||
33 | asm volatile( | |
34 | MPLOCKED | |
35 | "cmpxchgq %[src], %[dst];" | |
36 | "sete %[res];" | |
37 | : [res] "=a" (res), /* output */ | |
38 | [dst] "=m" (*dst) | |
39 | : [src] "r" (src), /* input */ | |
40 | "a" (exp), | |
41 | "m" (*dst) | |
42 | : "memory"); /* no-clobber list */ | |
43 | ||
44 | return res; | |
45 | } | |
46 | ||
11fdf7f2 TL |
47 | static inline uint64_t |
48 | rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) | |
49 | { | |
50 | asm volatile( | |
51 | MPLOCKED | |
52 | "xchgq %0, %1;" | |
53 | : "=r" (val), "=m" (*dst) | |
54 | : "0" (val), "m" (*dst) | |
55 | : "memory"); /* no-clobber list */ | |
56 | return val; | |
57 | } | |
58 | ||
7c673cae FG |
59 | static inline void |
60 | rte_atomic64_init(rte_atomic64_t *v) | |
61 | { | |
62 | v->cnt = 0; | |
63 | } | |
64 | ||
65 | static inline int64_t | |
66 | rte_atomic64_read(rte_atomic64_t *v) | |
67 | { | |
68 | return v->cnt; | |
69 | } | |
70 | ||
71 | static inline void | |
72 | rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) | |
73 | { | |
74 | v->cnt = new_value; | |
75 | } | |
76 | ||
77 | static inline void | |
78 | rte_atomic64_add(rte_atomic64_t *v, int64_t inc) | |
79 | { | |
80 | asm volatile( | |
81 | MPLOCKED | |
82 | "addq %[inc], %[cnt]" | |
83 | : [cnt] "=m" (v->cnt) /* output */ | |
84 | : [inc] "ir" (inc), /* input */ | |
85 | "m" (v->cnt) | |
86 | ); | |
87 | } | |
88 | ||
89 | static inline void | |
90 | rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) | |
91 | { | |
92 | asm volatile( | |
93 | MPLOCKED | |
94 | "subq %[dec], %[cnt]" | |
95 | : [cnt] "=m" (v->cnt) /* output */ | |
96 | : [dec] "ir" (dec), /* input */ | |
97 | "m" (v->cnt) | |
98 | ); | |
99 | } | |
100 | ||
101 | static inline void | |
102 | rte_atomic64_inc(rte_atomic64_t *v) | |
103 | { | |
104 | asm volatile( | |
105 | MPLOCKED | |
106 | "incq %[cnt]" | |
107 | : [cnt] "=m" (v->cnt) /* output */ | |
108 | : "m" (v->cnt) /* input */ | |
109 | ); | |
110 | } | |
111 | ||
112 | static inline void | |
113 | rte_atomic64_dec(rte_atomic64_t *v) | |
114 | { | |
115 | asm volatile( | |
116 | MPLOCKED | |
117 | "decq %[cnt]" | |
118 | : [cnt] "=m" (v->cnt) /* output */ | |
119 | : "m" (v->cnt) /* input */ | |
120 | ); | |
121 | } | |
122 | ||
123 | static inline int64_t | |
124 | rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) | |
125 | { | |
126 | int64_t prev = inc; | |
127 | ||
128 | asm volatile( | |
129 | MPLOCKED | |
130 | "xaddq %[prev], %[cnt]" | |
131 | : [prev] "+r" (prev), /* output */ | |
132 | [cnt] "=m" (v->cnt) | |
133 | : "m" (v->cnt) /* input */ | |
134 | ); | |
135 | return prev + inc; | |
136 | } | |
137 | ||
138 | static inline int64_t | |
139 | rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) | |
140 | { | |
141 | return rte_atomic64_add_return(v, -dec); | |
142 | } | |
143 | ||
144 | static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) | |
145 | { | |
146 | uint8_t ret; | |
147 | ||
148 | asm volatile( | |
149 | MPLOCKED | |
150 | "incq %[cnt] ; " | |
151 | "sete %[ret]" | |
152 | : [cnt] "+m" (v->cnt), /* output */ | |
153 | [ret] "=qm" (ret) | |
154 | ); | |
155 | ||
156 | return ret != 0; | |
157 | } | |
158 | ||
159 | static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) | |
160 | { | |
161 | uint8_t ret; | |
162 | ||
163 | asm volatile( | |
164 | MPLOCKED | |
165 | "decq %[cnt] ; " | |
166 | "sete %[ret]" | |
167 | : [cnt] "+m" (v->cnt), /* output */ | |
168 | [ret] "=qm" (ret) | |
169 | ); | |
170 | return ret != 0; | |
171 | } | |
172 | ||
173 | static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) | |
174 | { | |
175 | return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); | |
176 | } | |
177 | ||
178 | static inline void rte_atomic64_clear(rte_atomic64_t *v) | |
179 | { | |
180 | v->cnt = 0; | |
181 | } | |
182 | #endif | |
183 | ||
9f95a23c TL |
184 | /*------------------------ 128 bit atomic operations -------------------------*/ |
185 | ||
f67539c2 TL |
186 | __rte_experimental |
187 | static inline int | |
9f95a23c TL |
188 | rte_atomic128_cmp_exchange(rte_int128_t *dst, |
189 | rte_int128_t *exp, | |
190 | const rte_int128_t *src, | |
191 | unsigned int weak, | |
192 | int success, | |
193 | int failure) | |
194 | { | |
195 | RTE_SET_USED(weak); | |
196 | RTE_SET_USED(success); | |
197 | RTE_SET_USED(failure); | |
198 | uint8_t res; | |
199 | ||
200 | asm volatile ( | |
201 | MPLOCKED | |
202 | "cmpxchg16b %[dst];" | |
203 | " sete %[res]" | |
204 | : [dst] "=m" (dst->val[0]), | |
205 | "=a" (exp->val[0]), | |
206 | "=d" (exp->val[1]), | |
207 | [res] "=r" (res) | |
208 | : "b" (src->val[0]), | |
209 | "c" (src->val[1]), | |
210 | "a" (exp->val[0]), | |
211 | "d" (exp->val[1]), | |
212 | "m" (dst->val[0]) | |
213 | : "memory"); | |
214 | ||
215 | return res; | |
216 | } | |
217 | ||
7c673cae | 218 | #endif /* _RTE_ATOMIC_X86_64_H_ */ |