]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | /* | |
35 | * Inspired from FreeBSD src/sys/amd64/include/atomic.h | |
36 | * Copyright (c) 1998 Doug Rabson | |
9f95a23c | 37 | * Copyright (c) 2019 Intel Corporation |
7c673cae FG |
38 | * All rights reserved. |
39 | */ | |
40 | ||
41 | #ifndef _RTE_ATOMIC_X86_H_ | |
42 | #error do not include this file directly, use <rte_atomic.h> instead | |
43 | #endif | |
44 | ||
45 | #ifndef _RTE_ATOMIC_X86_64_H_ | |
46 | #define _RTE_ATOMIC_X86_64_H_ | |
47 | ||
48 | #include <stdint.h> | |
49 | #include <rte_common.h> | |
9f95a23c | 50 | #include <rte_compat.h> |
7c673cae FG |
51 | #include <rte_atomic.h> |
52 | ||
53 | /*------------------------- 64 bit atomic operations -------------------------*/ | |
54 | ||
55 | #ifndef RTE_FORCE_INTRINSICS | |
56 | static inline int | |
57 | rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) | |
58 | { | |
59 | uint8_t res; | |
60 | ||
61 | ||
62 | asm volatile( | |
63 | MPLOCKED | |
64 | "cmpxchgq %[src], %[dst];" | |
65 | "sete %[res];" | |
66 | : [res] "=a" (res), /* output */ | |
67 | [dst] "=m" (*dst) | |
68 | : [src] "r" (src), /* input */ | |
69 | "a" (exp), | |
70 | "m" (*dst) | |
71 | : "memory"); /* no-clobber list */ | |
72 | ||
73 | return res; | |
74 | } | |
75 | ||
11fdf7f2 TL |
76 | static inline uint64_t |
77 | rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) | |
78 | { | |
79 | asm volatile( | |
80 | MPLOCKED | |
81 | "xchgq %0, %1;" | |
82 | : "=r" (val), "=m" (*dst) | |
83 | : "0" (val), "m" (*dst) | |
84 | : "memory"); /* no-clobber list */ | |
85 | return val; | |
86 | } | |
87 | ||
7c673cae FG |
88 | static inline void |
89 | rte_atomic64_init(rte_atomic64_t *v) | |
90 | { | |
91 | v->cnt = 0; | |
92 | } | |
93 | ||
94 | static inline int64_t | |
95 | rte_atomic64_read(rte_atomic64_t *v) | |
96 | { | |
97 | return v->cnt; | |
98 | } | |
99 | ||
100 | static inline void | |
101 | rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) | |
102 | { | |
103 | v->cnt = new_value; | |
104 | } | |
105 | ||
106 | static inline void | |
107 | rte_atomic64_add(rte_atomic64_t *v, int64_t inc) | |
108 | { | |
109 | asm volatile( | |
110 | MPLOCKED | |
111 | "addq %[inc], %[cnt]" | |
112 | : [cnt] "=m" (v->cnt) /* output */ | |
113 | : [inc] "ir" (inc), /* input */ | |
114 | "m" (v->cnt) | |
115 | ); | |
116 | } | |
117 | ||
118 | static inline void | |
119 | rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) | |
120 | { | |
121 | asm volatile( | |
122 | MPLOCKED | |
123 | "subq %[dec], %[cnt]" | |
124 | : [cnt] "=m" (v->cnt) /* output */ | |
125 | : [dec] "ir" (dec), /* input */ | |
126 | "m" (v->cnt) | |
127 | ); | |
128 | } | |
129 | ||
130 | static inline void | |
131 | rte_atomic64_inc(rte_atomic64_t *v) | |
132 | { | |
133 | asm volatile( | |
134 | MPLOCKED | |
135 | "incq %[cnt]" | |
136 | : [cnt] "=m" (v->cnt) /* output */ | |
137 | : "m" (v->cnt) /* input */ | |
138 | ); | |
139 | } | |
140 | ||
141 | static inline void | |
142 | rte_atomic64_dec(rte_atomic64_t *v) | |
143 | { | |
144 | asm volatile( | |
145 | MPLOCKED | |
146 | "decq %[cnt]" | |
147 | : [cnt] "=m" (v->cnt) /* output */ | |
148 | : "m" (v->cnt) /* input */ | |
149 | ); | |
150 | } | |
151 | ||
152 | static inline int64_t | |
153 | rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) | |
154 | { | |
155 | int64_t prev = inc; | |
156 | ||
157 | asm volatile( | |
158 | MPLOCKED | |
159 | "xaddq %[prev], %[cnt]" | |
160 | : [prev] "+r" (prev), /* output */ | |
161 | [cnt] "=m" (v->cnt) | |
162 | : "m" (v->cnt) /* input */ | |
163 | ); | |
164 | return prev + inc; | |
165 | } | |
166 | ||
167 | static inline int64_t | |
168 | rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) | |
169 | { | |
170 | return rte_atomic64_add_return(v, -dec); | |
171 | } | |
172 | ||
173 | static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) | |
174 | { | |
175 | uint8_t ret; | |
176 | ||
177 | asm volatile( | |
178 | MPLOCKED | |
179 | "incq %[cnt] ; " | |
180 | "sete %[ret]" | |
181 | : [cnt] "+m" (v->cnt), /* output */ | |
182 | [ret] "=qm" (ret) | |
183 | ); | |
184 | ||
185 | return ret != 0; | |
186 | } | |
187 | ||
188 | static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) | |
189 | { | |
190 | uint8_t ret; | |
191 | ||
192 | asm volatile( | |
193 | MPLOCKED | |
194 | "decq %[cnt] ; " | |
195 | "sete %[ret]" | |
196 | : [cnt] "+m" (v->cnt), /* output */ | |
197 | [ret] "=qm" (ret) | |
198 | ); | |
199 | return ret != 0; | |
200 | } | |
201 | ||
202 | static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) | |
203 | { | |
204 | return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); | |
205 | } | |
206 | ||
207 | static inline void rte_atomic64_clear(rte_atomic64_t *v) | |
208 | { | |
209 | v->cnt = 0; | |
210 | } | |
211 | #endif | |
212 | ||
9f95a23c TL |
213 | /*------------------------ 128 bit atomic operations -------------------------*/ |
214 | ||
215 | /** | |
216 | * 128-bit integer structure. | |
217 | */ | |
218 | RTE_STD_C11 | |
219 | typedef struct { | |
220 | RTE_STD_C11 | |
221 | union { | |
222 | uint64_t val[2]; | |
223 | __extension__ __int128 int128; | |
224 | }; | |
225 | } __rte_aligned(16) rte_int128_t; | |
226 | ||
227 | static inline int __rte_experimental | |
228 | rte_atomic128_cmp_exchange(rte_int128_t *dst, | |
229 | rte_int128_t *exp, | |
230 | const rte_int128_t *src, | |
231 | unsigned int weak, | |
232 | int success, | |
233 | int failure) | |
234 | { | |
235 | RTE_SET_USED(weak); | |
236 | RTE_SET_USED(success); | |
237 | RTE_SET_USED(failure); | |
238 | uint8_t res; | |
239 | ||
240 | asm volatile ( | |
241 | MPLOCKED | |
242 | "cmpxchg16b %[dst];" | |
243 | " sete %[res]" | |
244 | : [dst] "=m" (dst->val[0]), | |
245 | "=a" (exp->val[0]), | |
246 | "=d" (exp->val[1]), | |
247 | [res] "=r" (res) | |
248 | : "b" (src->val[0]), | |
249 | "c" (src->val[1]), | |
250 | "a" (exp->val[0]), | |
251 | "d" (exp->val[1]), | |
252 | "m" (dst->val[0]) | |
253 | : "memory"); | |
254 | ||
255 | return res; | |
256 | } | |
257 | ||
7c673cae | 258 | #endif /* _RTE_ATOMIC_X86_64_H_ */ |